code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Any = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class A__ ( UpperCamelCase__ ):
@add_start_docstrings(_a )
def __call__( self : List[Any] , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : int ) -> bool:
"""simple docstring"""
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : int , _a : Optional[int] = None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =max_position_embeddings
@add_start_docstrings(_a )
def __call__( self : str , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : str ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =input_ids.shape[-1]
_SCREAMING_SNAKE_CASE =cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : int , _a : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , _a , )
_SCREAMING_SNAKE_CASE =start_length
_SCREAMING_SNAKE_CASE =max_new_tokens
_SCREAMING_SNAKE_CASE =start_length + max_new_tokens
@add_start_docstrings(_a )
def __call__( self : int , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : List[str] ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : float , _a : Optional[float] = None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =max_time
_SCREAMING_SNAKE_CASE =time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_a )
def __call__( self : Optional[int] , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : Dict ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class A__ ( UpperCamelCase__ ):
@add_start_docstrings(_a )
def __call__( self : List[Any] , _a : torch.LongTensor , _a : torch.FloatTensor , **_a : List[Any] ) -> bool:
"""simple docstring"""
return any(criteria(_a , _a ) for criteria in self )
@property
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(_a , _a ):
return stopping_criterium.max_length
elif isinstance(_a , _a ):
return stopping_criterium.max_length
return None
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =stopping_criteria.max_length
_SCREAMING_SNAKE_CASE =deepcopy(a__)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' ,a__)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a__))
return new_stopping_criteria | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
import math
def lowerCamelCase( a__ ,a__):
if (
not isinstance(a__ ,(int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''')
return apparent_power * power_factor
def lowerCamelCase( a__ ,a__):
if (
not isinstance(a__ ,(int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError('''power_factor must be a valid float value between -1 and 1.''')
return apparent_power * math.sqrt(1 - power_factor**2)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case_ : str = logging.get_logger(__name__)
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : int , _a : bool = True , _a : Dict[str, int] = None , _a : int = 0.9 , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : bool = True , _a : Dict[str, int] = None , _a : Union[int, float] = 1 / 255 , _a : bool = True , _a : bool = True , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , **_a : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =size if size is not None else {'''shortest_edge''': 224}
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='''crop_size''' )
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =crop_pct
_SCREAMING_SNAKE_CASE =resample
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self : List[Any] , _a : np.ndarray , _a : Dict[str, int] , _a : Optional[float] = None , _a : PILImageResampling = PILImageResampling.BICUBIC , _a : Optional[Union[str, ChannelDimension]] = None , **_a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
_SCREAMING_SNAKE_CASE =int(size['''shortest_edge'''] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
_SCREAMING_SNAKE_CASE =int(size['''height'''] / crop_pct )
else:
_SCREAMING_SNAKE_CASE =(int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct ))
else:
raise ValueError('''Invalid size for resize: {}'''.format(_a ) )
_SCREAMING_SNAKE_CASE =get_resize_output_image_size(_a , size=_a , default_to_square=_a )
else:
if "shortest_edge" in size:
_SCREAMING_SNAKE_CASE =get_resize_output_image_size(_a , size=size['''shortest_edge'''] , default_to_square=_a )
elif "height" in size and "width" in size:
_SCREAMING_SNAKE_CASE =(size['''height'''], size['''width'''])
else:
raise ValueError('''Invalid size for resize: {}'''.format(_a ) )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : Dict , _a : np.ndarray , _a : Dict[str, int] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def __UpperCamelCase ( self : Union[str, Any] , _a : np.ndarray , _a : Union[int, float] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Tuple , ) -> Tuple:
"""simple docstring"""
return rescale(_a , scale=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : List[Any] , _a : np.ndarray , _a : Union[float, List[float]] , _a : Union[float, List[float]] , _a : Optional[Union[str, ChannelDimension]] = None , **_a : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def __UpperCamelCase ( self : Tuple , _a : ImageInput , _a : bool = None , _a : Dict[str, int] = None , _a : int = None , _a : PILImageResampling = None , _a : bool = None , _a : Dict[str, int] = None , _a : bool = None , _a : float = None , _a : bool = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[float, List[float]]] = None , _a : Optional[Union[str, TensorType]] = None , _a : ChannelDimension = ChannelDimension.FIRST , **_a : str , ) -> PIL.Image.Image:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE =crop_pct if crop_pct is not None else self.crop_pct
_SCREAMING_SNAKE_CASE =resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE =do_center_crop if do_center_crop is not None else self.do_center_crop
_SCREAMING_SNAKE_CASE =do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE =rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE =image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE =image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE =size if size is not None else self.size
_SCREAMING_SNAKE_CASE =get_size_dict(_a , default_to_square=_a )
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else self.crop_size
_SCREAMING_SNAKE_CASE =get_size_dict(_a , param_name='''crop_size''' )
_SCREAMING_SNAKE_CASE =make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_pct is None:
raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE =[to_numpy_array(_a ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE =[self.resize(image=_a , size=_a , crop_pct=_a , resample=_a ) for image in images]
if do_center_crop:
_SCREAMING_SNAKE_CASE =[self.center_crop(image=_a , size=_a ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE =[self.rescale(image=_a , scale=_a ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE =[self.normalize(image=_a , mean=_a , std=_a ) for image in images]
_SCREAMING_SNAKE_CASE =[to_channel_dimension_format(_a , _a ) for image in images]
_SCREAMING_SNAKE_CASE ={'''pixel_values''': images}
return BatchFeature(data=_a , tensor_type=_a ) | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ProphetNetTokenizer
UpperCAmelCase = False
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __UpperCamelCase ( self : Tuple , _a : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''UNwant\u00E9d,running'''
_SCREAMING_SNAKE_CASE ='''unwanted, running'''
return input_text, output_text
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [9, 6, 7, 12, 10, 11] )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_SCREAMING_SNAKE_CASE ={}
for i, token in enumerate(_a ):
_SCREAMING_SNAKE_CASE =i
_SCREAMING_SNAKE_CASE =WordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_SCREAMING_SNAKE_CASE =[1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_a , _a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_SCREAMING_SNAKE_CASE =tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
_SCREAMING_SNAKE_CASE =tokenizer.build_inputs_with_special_tokens(_a )
_SCREAMING_SNAKE_CASE =tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102] | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
snake_case_ : int = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCamelCase( a__ ,a__=None ,a__=None ,a__=None):
_SCREAMING_SNAKE_CASE =True
while ask_again:
_SCREAMING_SNAKE_CASE =input(a__)
try:
if default is not None and len(a__) == 0:
return default
return convert_value(a__) if convert_value is not None else result
except Exception:
if error_message is not None:
print(a__)
def lowerCamelCase( a__ ,a__=[] ,a__=None ,a__=0):
_SCREAMING_SNAKE_CASE =BulletMenu(a__ ,a__)
_SCREAMING_SNAKE_CASE =menu.run(default_choice=a__)
return convert_value(a__) if convert_value is not None else result
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =int(a__)
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value])
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =int(a__)
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value])
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =int(a__)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =int(a__)
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value])
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =int(a__)
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value])
def lowerCamelCase( a__):
return {"yes": True, "no": False}[value.lower()]
class A__ ( argparse.RawDescriptionHelpFormatter ):
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Optional[Any] , _a : Any , _a : int ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =super()._format_usage(_a , _a , _a , _a )
_SCREAMING_SNAKE_CASE =usage.replace('''<command> [<args>] ''' , '''''' )
return usage | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
import sys
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =len(a__)
_SCREAMING_SNAKE_CASE =[[0 for x in range(a__)] for x in range(a__)]
_SCREAMING_SNAKE_CASE =[[0 for x in range(a__)] for x in range(a__)]
for chain_length in range(2 ,a__):
for a in range(1 ,n - chain_length + 1):
_SCREAMING_SNAKE_CASE =a + chain_length - 1
_SCREAMING_SNAKE_CASE =sys.maxsize
for c in range(a__ ,a__):
_SCREAMING_SNAKE_CASE =(
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_SCREAMING_SNAKE_CASE =cost
_SCREAMING_SNAKE_CASE =c
return matrix, sol
def lowerCamelCase( a__ ,a__ ,a__):
if i == j:
print('''A''' + str(a__) ,end=''' ''')
else:
print('''(''' ,end=''' ''')
print_optiomal_solution(a__ ,a__ ,optimal_solution[i][j])
print_optiomal_solution(a__ ,optimal_solution[i][j] + 1 ,a__)
print(''')''' ,end=''' ''')
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =[30, 35, 15, 5, 10, 20, 25]
_SCREAMING_SNAKE_CASE =len(a__)
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =matrix_chain_order(a__)
print('''No. of Operation required: ''' + str(matrix[1][n - 1]))
print_optiomal_solution(a__ ,1 ,n - 1)
if __name__ == "__main__":
main() | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
snake_case_ : Optional[Any] = logging.get_logger('''transformers.models.encodec''')
snake_case_ : Any = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
snake_case_ : str = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
snake_case_ : Optional[int] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
snake_case_ : Union[str, Any] = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
snake_case_ : Optional[Any] = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
snake_case_ : Any = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
snake_case_ : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
snake_case_ : Union[str, Any] = []
snake_case_ : int = []
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
for attribute in key.split('''.'''):
_SCREAMING_SNAKE_CASE =getattr(a__ ,a__)
if weight_type is not None:
_SCREAMING_SNAKE_CASE =getattr(a__ ,a__).shape
else:
_SCREAMING_SNAKE_CASE =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "running_mean":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "running_var":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "num_batches_tracked":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_ih_l0":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_hh_l0":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias_ih_l0":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias_hh_l0":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_ih_l1":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_hh_l1":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias_ih_l1":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias_hh_l1":
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
def lowerCamelCase( a__ ,a__):
for key in ignore_keys:
if key.endswith('''.*'''):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =key.split('''.*.''')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
if model_name == "encodec_24khz" or "encodec_32khz":
_SCREAMING_SNAKE_CASE =MAPPING_24K
elif model_name == "encodec_48khz":
_SCREAMING_SNAKE_CASE =MAPPING_48K
else:
raise ValueError(f"Unsupported model: {model_name}")
for name, value in orig_dict.items():
if should_ignore(a__ ,a__):
logger.info(f"{name} was ignored")
continue
_SCREAMING_SNAKE_CASE =False
for key, mapped_key in MAPPING.items():
if "*" in key:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =key.split('''.*.''')
if prefix in name and suffix in name:
_SCREAMING_SNAKE_CASE =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''') and name.endswith('''embed_avg'''):
continue
_SCREAMING_SNAKE_CASE =True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE =name.split(a__)[0].split('''.''')[-2]
_SCREAMING_SNAKE_CASE =mapped_key.replace('''*''' ,a__)
if "weight_g" in name:
_SCREAMING_SNAKE_CASE ='''weight_g'''
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE ='''weight_v'''
elif "weight_ih_l0" in name:
_SCREAMING_SNAKE_CASE ='''weight_ih_l0'''
elif "weight_hh_l0" in name:
_SCREAMING_SNAKE_CASE ='''weight_hh_l0'''
elif "bias_ih_l0" in name:
_SCREAMING_SNAKE_CASE ='''bias_ih_l0'''
elif "bias_hh_l0" in name:
_SCREAMING_SNAKE_CASE ='''bias_hh_l0'''
elif "weight_ih_l1" in name:
_SCREAMING_SNAKE_CASE ='''weight_ih_l1'''
elif "weight_hh_l1" in name:
_SCREAMING_SNAKE_CASE ='''weight_hh_l1'''
elif "bias_ih_l1" in name:
_SCREAMING_SNAKE_CASE ='''bias_ih_l1'''
elif "bias_hh_l1" in name:
_SCREAMING_SNAKE_CASE ='''bias_hh_l1'''
elif "bias" in name:
_SCREAMING_SNAKE_CASE ='''bias'''
elif "weight" in name:
_SCREAMING_SNAKE_CASE ='''weight'''
elif "running_mean" in name:
_SCREAMING_SNAKE_CASE ='''running_mean'''
elif "running_var" in name:
_SCREAMING_SNAKE_CASE ='''running_var'''
elif "num_batches_tracked" in name:
_SCREAMING_SNAKE_CASE ='''num_batches_tracked'''
else:
_SCREAMING_SNAKE_CASE =None
set_recursively(a__ ,a__ ,a__ ,a__ ,a__)
continue
if not is_used:
unused_weights.append(a__)
logger.warning(f"Unused weights: {unused_weights}")
@torch.no_grad()
def lowerCamelCase( a__ ,a__ ,a__ ,a__=None ,a__=None ,):
if config_path is not None:
_SCREAMING_SNAKE_CASE =EncodecConfig.from_pretrained(a__)
else:
_SCREAMING_SNAKE_CASE =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_SCREAMING_SNAKE_CASE =[8, 5, 4, 4]
_SCREAMING_SNAKE_CASE =[2.2]
_SCREAMING_SNAKE_CASE =64
_SCREAMING_SNAKE_CASE =3_2000
_SCREAMING_SNAKE_CASE =2048
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
elif model_name == "encodec_48khz":
_SCREAMING_SNAKE_CASE =[8, 5, 4, 2]
_SCREAMING_SNAKE_CASE =[3.0, 6.0, 12.0, 24.0]
_SCREAMING_SNAKE_CASE =4_8000
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE ='''time_group_norm'''
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =1.0
_SCREAMING_SNAKE_CASE =0.01
else:
raise ValueError(f"Unknown model name: {model_name}")
_SCREAMING_SNAKE_CASE =EncodecModel(a__)
_SCREAMING_SNAKE_CASE =EncodecFeatureExtractor(
feature_size=config.audio_channels ,sampling_rate=config.sampling_rate ,chunk_length_s=config.chunk_length_s ,overlap=config.overlap ,)
feature_extractor.save_pretrained(a__)
_SCREAMING_SNAKE_CASE =torch.load(a__)
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_SCREAMING_SNAKE_CASE =original_checkpoint['''best_state''']
recursively_load_weights(a__ ,a__ ,a__)
model.save_pretrained(a__)
if repo_id:
print('''Pushing to the hub...''')
feature_extractor.push_to_hub(a__)
model.push_to_hub(a__)
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
snake_case_ : Optional[Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
from __future__ import annotations
def lowerCamelCase( a__):
create_state_space_tree(a__ ,[] ,0 ,[0 for i in range(len(a__))])
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,):
if index == len(a__):
print(a__)
return
for i in range(len(a__)):
if not index_used[i]:
current_sequence.append(sequence[i])
_SCREAMING_SNAKE_CASE =True
create_state_space_tree(a__ ,a__ ,index + 1 ,a__)
current_sequence.pop()
_SCREAMING_SNAKE_CASE =False
snake_case_ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case_ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =Path(_a ) / '''preprocessor_config.json'''
_SCREAMING_SNAKE_CASE =Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =Path(_a ) / '''preprocessor_config.json'''
_SCREAMING_SNAKE_CASE =Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =CLIPConfig()
# Create a dummy config file with image_proceesor_type
_SCREAMING_SNAKE_CASE =Path(_a ) / '''preprocessor_config.json'''
_SCREAMING_SNAKE_CASE =Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_SCREAMING_SNAKE_CASE =CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_SCREAMING_SNAKE_CASE =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('''clip-base''' )
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE =Path(_a ) / '''preprocessor_config.json'''
_SCREAMING_SNAKE_CASE =Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_SCREAMING_SNAKE_CASE =CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A__ :
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42 # [batch_size x 3]
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
UpperCAmelCase = 42
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __UpperCamelCase ( self : Optional[int] ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.arange(self.height * self.width )
_SCREAMING_SNAKE_CASE =torch.stack(
[
pixel_indices % self.width,
torch.div(_a , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE =self.shape
_SCREAMING_SNAKE_CASE =int(np.prod(_a ) )
_SCREAMING_SNAKE_CASE =self.get_image_coords()
_SCREAMING_SNAKE_CASE =torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_SCREAMING_SNAKE_CASE =self.get_camera_rays(_a )
_SCREAMING_SNAKE_CASE =rays.view(_a , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __UpperCamelCase ( self : Optional[int] , _a : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_SCREAMING_SNAKE_CASE =coords.view(_a , -1 , 2 )
_SCREAMING_SNAKE_CASE =self.resolution()
_SCREAMING_SNAKE_CASE =self.fov()
_SCREAMING_SNAKE_CASE =(flat.float() / (res - 1)) * 2 - 1
_SCREAMING_SNAKE_CASE =fracs * torch.tan(fov / 2 )
_SCREAMING_SNAKE_CASE =fracs.view(_a , -1 , 2 )
_SCREAMING_SNAKE_CASE =(
self.z.view(_a , 1 , 3 )
+ self.x.view(_a , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(_a , 1 , 3 ) * fracs[:, :, 1:]
)
_SCREAMING_SNAKE_CASE =directions / directions.norm(dim=-1 , keepdim=_a )
_SCREAMING_SNAKE_CASE =torch.stack(
[
torch.broadcast_to(self.origin.view(_a , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(_a , *_a , 2 , 3 )
def __UpperCamelCase ( self : Tuple , _a : int , _a : int ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=_a , height=_a , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for theta in np.linspace(0 ,2 * np.pi ,num=20):
_SCREAMING_SNAKE_CASE =np.array([np.sin(a__), np.cos(a__), -0.5])
z /= np.sqrt(np.sum(z**2))
_SCREAMING_SNAKE_CASE =-z * 4
_SCREAMING_SNAKE_CASE =np.array([np.cos(a__), -np.sin(a__), 0.0])
_SCREAMING_SNAKE_CASE =np.cross(a__ ,a__)
origins.append(a__)
xs.append(a__)
ys.append(a__)
zs.append(a__)
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(a__ ,axis=0)).float() ,x=torch.from_numpy(np.stack(a__ ,axis=0)).float() ,y=torch.from_numpy(np.stack(a__ ,axis=0)).float() ,z=torch.from_numpy(np.stack(a__ ,axis=0)).float() ,width=a__ ,height=a__ ,x_fov=0.7 ,y_fov=0.7 ,shape=(1, len(a__)) ,) | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Optional[Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __UpperCamelCase ( *_a : Union[str, Any] , **_a : str ) -> List[str]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class A__ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_SCREAMING_SNAKE_CASE =image_classifier(_a , candidate_labels=['''a''', '''b''', '''c'''] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
_SCREAMING_SNAKE_CASE =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
] , )
@require_tf
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' )
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_SCREAMING_SNAKE_CASE =image_classifier(_a , candidate_labels=['''a''', '''b''', '''c'''] )
self.assertEqual(
nested_simplify(_a ) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
_SCREAMING_SNAKE_CASE =image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
[
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
{'''score''': 0.3_33, '''label''': ANY(_a )},
],
] , )
@slow
@require_torch
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_SCREAMING_SNAKE_CASE =image_classifier(_a , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_a ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_SCREAMING_SNAKE_CASE =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' )
# This is an image of 2 cats with remotes and no planes
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
_SCREAMING_SNAKE_CASE =image_classifier(_a , candidate_labels=['''cat''', '''plane''', '''remote'''] )
self.assertEqual(
nested_simplify(_a ) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
_SCREAMING_SNAKE_CASE =image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , ) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
snake_case_ : int = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
} | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
from __future__ import annotations
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =list(range(len(a__)))
_SCREAMING_SNAKE_CASE =[v / w for v, w in zip(a__ ,a__)]
index.sort(key=lambda a__: ratio[i] ,reverse=a__)
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =[0] * len(a__)
for i in index:
if weight[i] <= capacity:
_SCREAMING_SNAKE_CASE =1
max_value += value[i]
capacity -= weight[i]
else:
_SCREAMING_SNAKE_CASE =capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =word.split()
def justify(a__ ,a__ ,a__) -> str:
_SCREAMING_SNAKE_CASE =max_width - width
_SCREAMING_SNAKE_CASE =len(a__)
if len(a__) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_SCREAMING_SNAKE_CASE =words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_SCREAMING_SNAKE_CASE =spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_SCREAMING_SNAKE_CASE =(
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(a__):
num_spaces_between_words_list[i] += 1
_SCREAMING_SNAKE_CASE =[]
for i in range(a__):
# add the word
aligned_words_list.append(line[i])
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ''' ''')
# just add the last word to the sentence
aligned_words_list.append(line[-1])
# join the aligned words list to form a justified line
return "".join(a__)
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =0
for word in words:
if width + len(a__) + len(a__) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(a__)
width += len(a__)
else:
# justify the line and add it to result
answer.append(justify(a__ ,a__ ,a__))
# reset new line and new width
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =[word], len(a__)
_SCREAMING_SNAKE_CASE =max_width - width - len(a__)
answer.append(''' '''.join(a__) + (remaining_spaces + 1) * ''' ''')
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
snake_case_ : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
snake_case_ : int = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
snake_case_ : Dict = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
UpperCAmelCase = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
UpperCAmelCase = field(default=UpperCamelCase__ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase = field(default=UpperCamelCase__ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase = field(default=32 , metadata={"help": "The size of the square patches to use for masking."} )
UpperCAmelCase = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
if self.train_dir is not None:
_SCREAMING_SNAKE_CASE =self.train_dir
if self.validation_dir is not None:
_SCREAMING_SNAKE_CASE =self.validation_dir
_SCREAMING_SNAKE_CASE =data_files if data_files else None
@dataclass
class A__ :
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase__ )} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase = field(default=UpperCamelCase__ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Stride to use for the encoder."} , )
class A__ :
def __init__( self : Any , _a : Union[str, Any]=192 , _a : Union[str, Any]=32 , _a : Tuple=4 , _a : int=0.6 ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =input_size
_SCREAMING_SNAKE_CASE =mask_patch_size
_SCREAMING_SNAKE_CASE =model_patch_size
_SCREAMING_SNAKE_CASE =mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
_SCREAMING_SNAKE_CASE =self.input_size // self.mask_patch_size
_SCREAMING_SNAKE_CASE =self.mask_patch_size // self.model_patch_size
_SCREAMING_SNAKE_CASE =self.rand_size**2
_SCREAMING_SNAKE_CASE =int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.permutation(self.token_count )[: self.mask_count]
_SCREAMING_SNAKE_CASE =np.zeros(self.token_count , dtype=_a )
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =mask.reshape((self.rand_size, self.rand_size) )
_SCREAMING_SNAKE_CASE =mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =torch.stack([example['''pixel_values'''] for example in examples])
_SCREAMING_SNAKE_CASE =torch.stack([example['''mask'''] for example in examples])
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' ,a__ ,a__)
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout)] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE =training_args.get_process_log_level()
logger.setLevel(a__)
transformers.utils.logging.set_verbosity(a__)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE =None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''')
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''')
# Initialize our dataset.
_SCREAMING_SNAKE_CASE =load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,data_files=data_args.data_files ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# If we don't have a validation split, split off a percentage of train as validation.
_SCREAMING_SNAKE_CASE =None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,a__) and data_args.train_val_split > 0.0:
_SCREAMING_SNAKE_CASE =ds['''train'''].train_test_split(data_args.train_val_split)
_SCREAMING_SNAKE_CASE =split['''train''']
_SCREAMING_SNAKE_CASE =split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE ={
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(model_args.config_name_or_path ,**a__)
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(model_args.model_name_or_path ,**a__)
else:
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''')
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}")
config.update_from_string(model_args.config_overrides)
logger.info(f"New config: {config}")
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(a__ ,'''decoder_type'''):
_SCREAMING_SNAKE_CASE ='''simmim'''
# adapt config
_SCREAMING_SNAKE_CASE =model_args.image_size if model_args.image_size is not None else config.image_size
_SCREAMING_SNAKE_CASE =model_args.patch_size if model_args.patch_size is not None else config.patch_size
_SCREAMING_SNAKE_CASE =(
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
})
# create image processor
if model_args.image_processor_name:
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(model_args.image_processor_name ,**a__)
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(model_args.model_name_or_path ,**a__)
else:
_SCREAMING_SNAKE_CASE ={
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_SCREAMING_SNAKE_CASE =IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE =AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
logger.info('''Training new model from scratch''')
_SCREAMING_SNAKE_CASE =AutoModelForMaskedImageModeling.from_config(a__)
if training_args.do_train:
_SCREAMING_SNAKE_CASE =ds['''train'''].column_names
else:
_SCREAMING_SNAKE_CASE =ds['''validation'''].column_names
if data_args.image_column_name is not None:
_SCREAMING_SNAKE_CASE =data_args.image_column_name
elif "image" in column_names:
_SCREAMING_SNAKE_CASE ='''image'''
elif "img" in column_names:
_SCREAMING_SNAKE_CASE ='''img'''
else:
_SCREAMING_SNAKE_CASE =column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_SCREAMING_SNAKE_CASE =Compose(
[
Lambda(lambda a__: img.convert('''RGB''') if img.mode != "RGB" else img),
RandomResizedCrop(model_args.image_size ,scale=(0.67, 1.0) ,ratio=(3.0 / 4.0, 4.0 / 3.0)),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean ,std=image_processor.image_std),
])
# create mask generator
_SCREAMING_SNAKE_CASE =MaskGenerator(
input_size=model_args.image_size ,mask_patch_size=data_args.mask_patch_size ,model_patch_size=model_args.patch_size ,mask_ratio=data_args.mask_ratio ,)
def preprocess_images(a__):
_SCREAMING_SNAKE_CASE =[transforms(a__) for image in examples[image_column_name]]
_SCREAMING_SNAKE_CASE =[mask_generator() for i in range(len(examples[image_column_name]))]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''')
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE =ds['''train'''].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(a__)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''')
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE =(
ds['''validation'''].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(a__)
# Initialize our trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=ds['''train'''] if training_args.do_train else None ,eval_dataset=ds['''validation'''] if training_args.do_eval else None ,tokenizer=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE =None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE =last_checkpoint
_SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=a__)
trainer.save_model()
trainer.log_metrics('''train''' ,train_result.metrics)
trainer.save_metrics('''train''' ,train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
_SCREAMING_SNAKE_CASE =trainer.evaluate()
trainer.log_metrics('''eval''' ,a__)
trainer.save_metrics('''eval''' ,a__)
# Write model card and (optionally) push to hub
_SCREAMING_SNAKE_CASE ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__)
else:
trainer.create_model_card(**a__)
if __name__ == "__main__":
main() | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : List[Any] = 16
snake_case_ : Optional[Any] = 32
def lowerCamelCase( a__ ,a__ = 16 ,a__ = "bert-base-cased"):
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(a__)
_SCREAMING_SNAKE_CASE =load_dataset('''glue''' ,'''mrpc''')
def tokenize_function(a__):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE =tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=a__ ,max_length=a__)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_SCREAMING_SNAKE_CASE =datasets.map(
a__ ,batched=a__ ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=a__)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('''label''' ,'''labels''')
def collate_fn(a__):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a__ ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''')
return tokenizer.pad(a__ ,padding='''longest''' ,return_tensors='''pt''')
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['''train'''] ,shuffle=a__ ,collate_fn=a__ ,batch_size=a__)
_SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['''validation'''] ,shuffle=a__ ,collate_fn=a__ ,batch_size=a__)
return train_dataloader, eval_dataloader
def lowerCamelCase( a__ ,a__):
# Initialize accelerator
_SCREAMING_SNAKE_CASE =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE =config['''lr''']
_SCREAMING_SNAKE_CASE =int(config['''num_epochs'''])
_SCREAMING_SNAKE_CASE =int(config['''seed'''])
_SCREAMING_SNAKE_CASE =int(config['''batch_size'''])
_SCREAMING_SNAKE_CASE =args.model_name_or_path
set_seed(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_dataloaders(a__ ,a__ ,a__)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained(a__ ,return_dict=a__)
# Instantiate optimizer
_SCREAMING_SNAKE_CASE =(
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_SCREAMING_SNAKE_CASE =optimizer_cls(params=model.parameters() ,lr=a__)
if accelerator.state.deepspeed_plugin is not None:
_SCREAMING_SNAKE_CASE =accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =(len(a__) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=a__ ,num_warmup_steps=0 ,num_training_steps=a__ ,)
else:
_SCREAMING_SNAKE_CASE =DummyScheduler(a__ ,total_num_steps=a__ ,warmup_num_steps=0)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.prepare(
a__ ,a__ ,a__ ,a__ ,a__)
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE =0
# We also need to keep track of the stating epoch so files are named properly
_SCREAMING_SNAKE_CASE =0
# Now we train the model
_SCREAMING_SNAKE_CASE =evaluate.load('''glue''' ,'''mrpc''')
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
for epoch in range(a__ ,a__):
model.train()
for step, batch in enumerate(a__):
_SCREAMING_SNAKE_CASE =model(**a__)
_SCREAMING_SNAKE_CASE =outputs.loss
_SCREAMING_SNAKE_CASE =loss / gradient_accumulation_steps
accelerator.backward(a__)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_SCREAMING_SNAKE_CASE =0
for step, batch in enumerate(a__):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**a__)
_SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =accelerator.gather(
(predictions, batch['''labels'''])) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(a__) - 1:
_SCREAMING_SNAKE_CASE =predictions[: len(eval_dataloader.dataset) - samples_seen]
_SCREAMING_SNAKE_CASE =references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=a__ ,references=a__ ,)
_SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" ,a__)
_SCREAMING_SNAKE_CASE =eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
_SCREAMING_SNAKE_CASE =eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''all_results.json''') ,'''w''') as f:
json.dump(a__ ,a__)
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''')
parser.add_argument(
'''--model_name_or_path''' ,type=a__ ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=a__ ,)
parser.add_argument(
'''--output_dir''' ,type=a__ ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--performance_lower_bound''' ,type=a__ ,default=a__ ,help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=a__ ,default=3 ,help='''Number of train epochs.''' ,)
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE ={'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(a__ ,a__)
if __name__ == "__main__":
main() | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[str] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Union[str, Any] = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
snake_case_ : List[Any] = {
'''gpt-neox-20b''': 20_48,
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , _a : Optional[int]=None , _a : Union[str, Any]=None , _a : List[Any]=None , _a : Union[str, Any]="<|endoftext|>" , _a : Optional[Any]="<|endoftext|>" , _a : Optional[Any]="<|endoftext|>" , _a : List[Any]=False , **_a : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , add_prefix_space=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
def __UpperCamelCase ( self : List[Any] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Union[str, Any] , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
return input_ids | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : str = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "deta"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : int , _a : Dict=None , _a : Optional[int]=900 , _a : Tuple=2048 , _a : Tuple=6 , _a : List[Any]=2048 , _a : int=8 , _a : List[Any]=6 , _a : List[str]=1024 , _a : str=8 , _a : Union[str, Any]=0.0 , _a : List[str]=True , _a : Optional[Any]="relu" , _a : Dict=256 , _a : int=0.1 , _a : Optional[int]=0.0 , _a : Tuple=0.0 , _a : Optional[int]=0.02 , _a : Dict=1.0 , _a : str=True , _a : Union[str, Any]=False , _a : Optional[int]="sine" , _a : int=5 , _a : int=4 , _a : Optional[Any]=4 , _a : List[Any]=True , _a : Optional[Any]=300 , _a : Dict=True , _a : Dict=True , _a : Union[str, Any]=1 , _a : Dict=5 , _a : Tuple=2 , _a : str=1 , _a : Optional[Any]=1 , _a : str=5 , _a : Optional[int]=2 , _a : Optional[Any]=0.1 , _a : Union[str, Any]=0.25 , **_a : Any , ) -> List[str]:
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =backbone_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =CONFIG_MAPPING[backbone_model_type]
_SCREAMING_SNAKE_CASE =config_class.from_dict(_a )
_SCREAMING_SNAKE_CASE =backbone_config
_SCREAMING_SNAKE_CASE =num_queries
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =init_xavier_std
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =auxiliary_loss
_SCREAMING_SNAKE_CASE =position_embedding_type
# deformable attributes
_SCREAMING_SNAKE_CASE =num_feature_levels
_SCREAMING_SNAKE_CASE =encoder_n_points
_SCREAMING_SNAKE_CASE =decoder_n_points
_SCREAMING_SNAKE_CASE =two_stage
_SCREAMING_SNAKE_CASE =two_stage_num_proposals
_SCREAMING_SNAKE_CASE =with_box_refine
_SCREAMING_SNAKE_CASE =assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_SCREAMING_SNAKE_CASE =class_cost
_SCREAMING_SNAKE_CASE =bbox_cost
_SCREAMING_SNAKE_CASE =giou_cost
# Loss coefficients
_SCREAMING_SNAKE_CASE =mask_loss_coefficient
_SCREAMING_SNAKE_CASE =dice_loss_coefficient
_SCREAMING_SNAKE_CASE =bbox_loss_coefficient
_SCREAMING_SNAKE_CASE =giou_loss_coefficient
_SCREAMING_SNAKE_CASE =eos_coefficient
_SCREAMING_SNAKE_CASE =focal_alpha
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.d_model
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.backbone_config.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase( *a__):
with open(a__ ,'''r''') as fh:
fcntl.flock(a__ ,fcntl.LOCK_EX)
try:
print(*a__)
finally:
fcntl.flock(a__ ,fcntl.LOCK_UN)
snake_case_ : Union[str, Any] = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
snake_case_ : Optional[int] = torch.device('''cuda''', local_rank)
snake_case_ : List[str] = socket.gethostname()
snake_case_ : Tuple = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
snake_case_ : Tuple = dist.get_rank()
snake_case_ : Optional[Any] = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def lowerCamelCase( a__): # picklable for multiprocessing
return x.sum()
def lowerCamelCase( a__): # picklable for multiprocessing
return i + 1
@dataclass
class A__ :
UpperCAmelCase = 42
UpperCAmelCase = 42
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =[1, 2]
_SCREAMING_SNAKE_CASE ={'''a''': 1, '''b''': 2}
_SCREAMING_SNAKE_CASE ={'''a''': [1, 2], '''b''': [3, 4]}
_SCREAMING_SNAKE_CASE ={'''a''': {'''1''': 1}, '''b''': 2}
_SCREAMING_SNAKE_CASE ={'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =[2, 3]
_SCREAMING_SNAKE_CASE ={'''a''': 2, '''b''': 3}
_SCREAMING_SNAKE_CASE ={'''a''': [2, 3], '''b''': [4, 5]}
_SCREAMING_SNAKE_CASE ={'''a''': {'''1''': 2}, '''b''': 3}
_SCREAMING_SNAKE_CASE ={'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(_a , _a ) , _a )
self.assertEqual(map_nested(_a , _a ) , _a )
self.assertEqual(map_nested(_a , _a ) , _a )
self.assertEqual(map_nested(_a , _a ) , _a )
self.assertEqual(map_nested(_a , _a ) , _a )
self.assertEqual(map_nested(_a , _a ) , _a )
self.assertEqual(map_nested(_a , _a ) , _a )
self.assertEqual(map_nested(_a , _a ) , _a )
_SCREAMING_SNAKE_CASE =2
self.assertEqual(map_nested(_a , _a , num_proc=_a ) , _a )
self.assertEqual(map_nested(_a , _a , num_proc=_a ) , _a )
self.assertEqual(map_nested(_a , _a , num_proc=_a ) , _a )
self.assertEqual(map_nested(_a , _a , num_proc=_a ) , _a )
self.assertEqual(map_nested(_a , _a , num_proc=_a ) , _a )
self.assertEqual(map_nested(_a , _a , num_proc=_a ) , _a )
self.assertEqual(map_nested(_a , _a , num_proc=_a ) , _a )
self.assertEqual(map_nested(_a , _a , num_proc=_a ) , _a )
_SCREAMING_SNAKE_CASE ={'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
_SCREAMING_SNAKE_CASE ={'''a''': 2, '''b''': 0, '''c''': 2}
_SCREAMING_SNAKE_CASE ={
'''a''': np.eye(2 ).astype(_a ),
'''b''': np.zeros(3 ).astype(_a ),
'''c''': np.ones(2 ).astype(_a ),
}
self.assertEqual(map_nested(_a , _a , map_numpy=_a ) , _a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_a , _a , map_numpy=_a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_a , _a , map_numpy=_a , num_proc=_a ) , _a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_a , _a , map_numpy=_a , num_proc=_a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_a ): # can't pickle a local lambda
map_nested(lambda _a : x + 1 , _a , num_proc=_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={'''a''': 1, '''b''': 2}
_SCREAMING_SNAKE_CASE ={'''a''': 3, '''b''': 4}
_SCREAMING_SNAKE_CASE ={'''a''': 5, '''b''': 6}
_SCREAMING_SNAKE_CASE =sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_a , _a , _a ) ) , _a )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
class A__ :
UpperCAmelCase = "bar"
_SCREAMING_SNAKE_CASE =Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(_a , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' ,[
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] ,)
def lowerCamelCase( a__ ,a__ ,a__):
with patch('''datasets.utils.py_utils._single_map_nested''') as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''') as mock_multiprocessing_pool:
_SCREAMING_SNAKE_CASE ={f"{i}": i for i in range(a__)}
_SCREAMING_SNAKE_CASE =map_nested(lambda a__: x + 10 ,a__ ,num_proc=a__ ,parallel_min_length=16)
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class A__ ( UpperCamelCase__ ):
@require_tf
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
import tensorflow as tf
from tensorflow.keras import layers
_SCREAMING_SNAKE_CASE =layers.Dense(2 )
def gen_random_output():
_SCREAMING_SNAKE_CASE =tf.random.uniform((1, 3) )
return model(_a ).numpy()
with temp_seed(42 , set_tensorflow=_a ):
_SCREAMING_SNAKE_CASE =gen_random_output()
with temp_seed(42 , set_tensorflow=_a ):
_SCREAMING_SNAKE_CASE =gen_random_output()
_SCREAMING_SNAKE_CASE =gen_random_output()
np.testing.assert_equal(_a , _a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
import torch
def gen_random_output():
_SCREAMING_SNAKE_CASE =torch.nn.Linear(3 , 2 )
_SCREAMING_SNAKE_CASE =torch.rand(1 , 3 )
return model(_a ).detach().numpy()
with temp_seed(42 , set_pytorch=_a ):
_SCREAMING_SNAKE_CASE =gen_random_output()
with temp_seed(42 , set_pytorch=_a ):
_SCREAMING_SNAKE_CASE =gen_random_output()
_SCREAMING_SNAKE_CASE =gen_random_output()
np.testing.assert_equal(_a , _a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
_SCREAMING_SNAKE_CASE =gen_random_output()
with temp_seed(42 ):
_SCREAMING_SNAKE_CASE =gen_random_output()
_SCREAMING_SNAKE_CASE =gen_random_output()
np.testing.assert_equal(_a , _a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' ,[{}])
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =NestedDataStructure(a__).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' ,[
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] ,)
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =NestedDataStructure(a__).flatten()
assert output == expected_output
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =A(x=1 ,y='''foobar''')
_SCREAMING_SNAKE_CASE ={'''x''': 1, '''y''': '''foobar'''}
assert asdict(a__) == expected_output
_SCREAMING_SNAKE_CASE ={'''a''': {'''b''': A(x=10 ,y='''foo''')}, '''c''': [A(x=20 ,y='''bar''')]}
_SCREAMING_SNAKE_CASE ={'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(a__) == expected_output
with pytest.raises(a__):
asdict([1, A(x=10 ,y='''foo''')])
def lowerCamelCase( a__):
return text.split()
def lowerCamelCase( a__):
yield (time.time(), content)
time.sleep(2)
yield (time.time(), content)
def lowerCamelCase( ):
with Pool(2) as pool:
_SCREAMING_SNAKE_CASE =list(iflatmap_unordered(a__ ,_split_text ,kwargs_iterable=[{'''text''': '''hello there'''}] * 10))
assert out.count('''hello''') == 10
assert out.count('''there''') == 10
assert len(a__) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2) as pool:
_SCREAMING_SNAKE_CASE =list(iflatmap_unordered(a__ ,_split_text ,kwargs_iterable=[{'''text''': '''hello there'''}] * 10))
assert out.count('''hello''') == 10
assert out.count('''there''') == 10
assert len(a__) == 20
# check that we get items as fast as possible
with Pool(2) as pool:
_SCREAMING_SNAKE_CASE =[]
for yield_time, content in iflatmap_unordered(
a__ ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}]):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(a__)
assert out.count('''a''') == 2
assert out.count('''b''') == 2
assert len(a__) == 4 | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "wav2vec2"
def __init__( self : Union[str, Any] , _a : Tuple=32 , _a : int=768 , _a : Optional[int]=12 , _a : Union[str, Any]=12 , _a : Union[str, Any]=3072 , _a : Dict="gelu" , _a : Dict=0.1 , _a : Tuple=0.1 , _a : Tuple=0.1 , _a : Dict=0.0 , _a : int=0.0 , _a : Dict=0.1 , _a : List[str]=0.1 , _a : str=0.02 , _a : Optional[int]=1E-5 , _a : Union[str, Any]="group" , _a : str="gelu" , _a : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , _a : Dict=(5, 2, 2, 2, 2, 2, 2) , _a : int=(10, 3, 3, 3, 3, 2, 2) , _a : Any=False , _a : Optional[int]=128 , _a : str=16 , _a : List[str]=False , _a : Tuple=True , _a : Dict=0.05 , _a : Any=10 , _a : str=2 , _a : List[Any]=0.0 , _a : Dict=10 , _a : List[str]=0 , _a : List[str]=320 , _a : Optional[int]=2 , _a : Optional[Any]=0.1 , _a : str=100 , _a : Any=256 , _a : Optional[Any]=256 , _a : Optional[Any]=0.1 , _a : Optional[Any]="sum" , _a : Tuple=False , _a : Tuple=False , _a : Optional[Any]=256 , _a : Tuple=(512, 512, 512, 512, 1500) , _a : List[str]=(5, 3, 3, 1, 1) , _a : int=(1, 2, 3, 1, 1) , _a : Optional[int]=512 , _a : str=0 , _a : Tuple=1 , _a : Any=2 , _a : Union[str, Any]=False , _a : Optional[Any]=3 , _a : Union[str, Any]=2 , _a : Tuple=3 , _a : List[str]=None , _a : Dict=None , **_a : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =feat_extract_norm
_SCREAMING_SNAKE_CASE =feat_extract_activation
_SCREAMING_SNAKE_CASE =list(_a )
_SCREAMING_SNAKE_CASE =list(_a )
_SCREAMING_SNAKE_CASE =list(_a )
_SCREAMING_SNAKE_CASE =conv_bias
_SCREAMING_SNAKE_CASE =num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE =num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE =len(self.conv_dim )
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =hidden_dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =feat_proj_dropout
_SCREAMING_SNAKE_CASE =final_dropout
_SCREAMING_SNAKE_CASE =layerdrop
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =do_stable_layer_norm
_SCREAMING_SNAKE_CASE =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE =apply_spec_augment
_SCREAMING_SNAKE_CASE =mask_time_prob
_SCREAMING_SNAKE_CASE =mask_time_length
_SCREAMING_SNAKE_CASE =mask_time_min_masks
_SCREAMING_SNAKE_CASE =mask_feature_prob
_SCREAMING_SNAKE_CASE =mask_feature_length
_SCREAMING_SNAKE_CASE =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_SCREAMING_SNAKE_CASE =num_codevectors_per_group
_SCREAMING_SNAKE_CASE =num_codevector_groups
_SCREAMING_SNAKE_CASE =contrastive_logits_temperature
_SCREAMING_SNAKE_CASE =feat_quantizer_dropout
_SCREAMING_SNAKE_CASE =num_negatives
_SCREAMING_SNAKE_CASE =codevector_dim
_SCREAMING_SNAKE_CASE =proj_codevector_dim
_SCREAMING_SNAKE_CASE =diversity_loss_weight
# ctc loss
_SCREAMING_SNAKE_CASE =ctc_loss_reduction
_SCREAMING_SNAKE_CASE =ctc_zero_infinity
# adapter
_SCREAMING_SNAKE_CASE =add_adapter
_SCREAMING_SNAKE_CASE =adapter_kernel_size
_SCREAMING_SNAKE_CASE =adapter_stride
_SCREAMING_SNAKE_CASE =num_adapter_layers
_SCREAMING_SNAKE_CASE =output_hidden_size or hidden_size
_SCREAMING_SNAKE_CASE =adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_SCREAMING_SNAKE_CASE =list(_a )
_SCREAMING_SNAKE_CASE =list(_a )
_SCREAMING_SNAKE_CASE =list(_a )
_SCREAMING_SNAKE_CASE =xvector_output_dim
@property
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
snake_case_ : str = None
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Optional[int] = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
snake_case_ : Any = {
'''t5-small''': 5_12,
'''t5-base''': 5_12,
'''t5-large''': 5_12,
'''t5-3b''': 5_12,
'''t5-11b''': 5_12,
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = TaTokenizer
UpperCAmelCase = []
def __init__( self : int , _a : str=None , _a : Optional[int]=None , _a : List[str]="</s>" , _a : int="<unk>" , _a : int="<pad>" , _a : Tuple=100 , _a : Union[str, Any]=None , **_a : Union[str, Any] , ) -> Any:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_SCREAMING_SNAKE_CASE =[f"<extra_id_{i}>" for i in range(_a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_SCREAMING_SNAKE_CASE =len(set(filter(lambda _a : bool('''extra_id_''' in str(_a ) ) , _a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_a , tokenizer_file=_a , eos_token=_a , unk_token=_a , pad_token=_a , extra_ids=_a , additional_special_tokens=_a , **_a , )
_SCREAMING_SNAKE_CASE =vocab_file
_SCREAMING_SNAKE_CASE =False if not self.vocab_file else True
_SCREAMING_SNAKE_CASE =extra_ids
@staticmethod
def __UpperCamelCase ( _a : Union[str, Any] , _a : Union[str, Any] , _a : Optional[int] ) -> Dict:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_SCREAMING_SNAKE_CASE =TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _a , )
return max_model_length
def __UpperCamelCase ( self : List[str] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
logger.info(f"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def __UpperCamelCase ( self : int , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_SCREAMING_SNAKE_CASE =token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __UpperCamelCase ( self : Any , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return list(
set(filter(lambda _a : bool(re.search(R'''<extra_id_\d+>''' , _a ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return [self.convert_tokens_to_ids(_a ) for token in self.get_sentinel_tokens()] | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
from typing import Any
class A__ :
def __init__( self : Tuple , _a : Any ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =data
_SCREAMING_SNAKE_CASE =None
class A__ :
def __init__( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.head
while temp is not None:
print(temp.data , end=''' ''' )
_SCREAMING_SNAKE_CASE =temp.next
print()
def __UpperCamelCase ( self : List[str] , _a : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Node(_a )
_SCREAMING_SNAKE_CASE =self.head
_SCREAMING_SNAKE_CASE =new_node
def __UpperCamelCase ( self : Union[str, Any] , _a : Any , _a : List[Any] ) -> Tuple:
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_SCREAMING_SNAKE_CASE =self.head
while node_a is not None and node_a.data != node_data_a:
_SCREAMING_SNAKE_CASE =node_a.next
_SCREAMING_SNAKE_CASE =self.head
while node_a is not None and node_a.data != node_data_a:
_SCREAMING_SNAKE_CASE =node_a.next
if node_a is None or node_a is None:
return
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =node_a.data, node_a.data
if __name__ == "__main__":
snake_case_ : Optional[Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list() | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
import math
def lowerCamelCase( a__ ,a__):
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''')
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''')
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a__)) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''') | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
snake_case_ : str = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
snake_case_ : Any = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =(images / 2 + 0.5).clamp(0 ,1)
_SCREAMING_SNAKE_CASE =images.cpu().permute(0 ,2 ,3 ,1).float().numpy()
_SCREAMING_SNAKE_CASE =numpy_to_pil(a__)
return images
def lowerCamelCase( a__):
if images.ndim == 3:
_SCREAMING_SNAKE_CASE =images[None, ...]
_SCREAMING_SNAKE_CASE =(images * 255).round().astype('''uint8''')
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_SCREAMING_SNAKE_CASE =[Image.fromarray(image.squeeze() ,mode='''L''') for image in images]
else:
_SCREAMING_SNAKE_CASE =[Image.fromarray(a__) for image in images]
return pil_images | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
import os
import pytest
from attr import dataclass
snake_case_ : Any = '''us-east-1''' # defaults region
@dataclass
class A__ :
UpperCAmelCase = 42
UpperCAmelCase = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
UpperCAmelCase = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
UpperCAmelCase = {**hyperparameters, "max_steps": 1000}
@property
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return f"{self.framework}-transfromers-test"
@property
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''')
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =SageMakerTestEnvironment(framework=request.cls.framework) | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
snake_case_ : int = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
snake_case_ : Any = '''sshleifer/student_marian_en_ro_6_1'''
snake_case_ : Dict = '''sshleifer/tiny-mbart'''
@require_torch
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any , _a : List[Any]=False , _a : List[str]=None , _a : Optional[int]=True , _a : str=True , _a : Optional[int]=True , _a : Optional[int]=True , ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_a , num_train_epochs=1 , distributed=_a , extra_args_str=_a , predict_with_generate=_a , do_train=_a , do_eval=_a , do_predict=_a , )
_SCREAMING_SNAKE_CASE =TrainerState.load_from_json(os.path.join(_a , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE =[log for log in logs if '''eval_loss''' in log.keys()]
_SCREAMING_SNAKE_CASE =eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE =eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _a )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a )
@require_torch_multi_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_a )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=_a , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_a )
@require_apex
@require_torch_gpu
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_a , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __UpperCamelCase ( self : str , _a : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
_SCREAMING_SNAKE_CASE =experiments[experiment_id]
_SCREAMING_SNAKE_CASE ={'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
_SCREAMING_SNAKE_CASE ='''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_a , extra_args_str=data['''extra_args_str'''] )
_SCREAMING_SNAKE_CASE =len(re.findall(_a , cl.err ) )
self.assertEqual(_a , data['''n_matches'''] )
@slow
def __UpperCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_a , learning_rate=3E-4 , num_train_epochs=10 , distributed=_a , )
# Check metrics
_SCREAMING_SNAKE_CASE =TrainerState.load_from_json(os.path.join(_a , '''trainer_state.json''' ) ).log_history
_SCREAMING_SNAKE_CASE =[log for log in logs if '''eval_loss''' in log.keys()]
_SCREAMING_SNAKE_CASE =eval_metrics[0]
_SCREAMING_SNAKE_CASE =eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _a )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_a : str ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE ='''--skip_memory_metrics 0'''
_SCREAMING_SNAKE_CASE =self.run_trainer(
max_len=128 , model_name=_a , learning_rate=3E-4 , num_train_epochs=1 , optim=_a , distributed=_a , extra_args_str=_a , do_eval=_a , do_predict=_a , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE =TrainerState.load_from_json(Path(_a , '''trainer_state.json''' ) ).log_history
_SCREAMING_SNAKE_CASE =int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
_SCREAMING_SNAKE_CASE =int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
_SCREAMING_SNAKE_CASE =logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE =gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE =gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE =gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE =gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE =120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_a , _a , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_a , _a , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_a , _a , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def __UpperCamelCase ( self : Optional[int] , _a : int , _a : str , _a : int , _a : float = 3E-3 , _a : str = "adafactor" , _a : bool = False , _a : str = None , _a : int = 0 , _a : bool = True , _a : bool = True , _a : bool = True , _a : bool = True , _a : int = None , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_a )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_a )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE =f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_a )}\n ".split()
_SCREAMING_SNAKE_CASE ='''
--do_predict
'''.split()
_SCREAMING_SNAKE_CASE =[]
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE =get_gpu_count()
_SCREAMING_SNAKE_CASE =get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE =f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE =[sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_a , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE =['''run_translation.py'''] + args
with patch.object(_a , '''argv''' , _a ):
main()
return output_dir | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =prime_factors(a__)
if is_square_free(a__):
return -1 if len(a__) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
snake_case_ : Optional[int] = logging.getLogger(__name__)
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
UpperCAmelCase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class A__ :
UpperCAmelCase = field(default=UpperCamelCase__ , metadata={"help": "The input training data file (a text file)."} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
if self.train_file is not None:
_SCREAMING_SNAKE_CASE =self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_SCREAMING_SNAKE_CASE =self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A__ :
UpperCAmelCase = 42
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
def __call__( self : Optional[Any] , _a : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''label''' if '''label''' in features[0].keys() else '''labels'''
_SCREAMING_SNAKE_CASE =[feature.pop(_a ) for feature in features]
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =len(features[0]['''input_ids'''] )
_SCREAMING_SNAKE_CASE =[
[{k: v[i] for k, v in feature.items()} for i in range(_a )] for feature in features
]
_SCREAMING_SNAKE_CASE =list(chain(*_a ) )
_SCREAMING_SNAKE_CASE =self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
_SCREAMING_SNAKE_CASE ={k: v.view(_a , _a , -1 ) for k, v in batch.items()}
# Add back labels
_SCREAMING_SNAKE_CASE =torch.tensor(_a , dtype=torch.intaa )
return batch
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' ,a__ ,a__)
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout)] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE =training_args.get_process_log_level()
logger.setLevel(a__)
datasets.utils.logging.set_verbosity(a__)
transformers.utils.logging.set_verbosity(a__)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE =None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''')
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''')
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_SCREAMING_SNAKE_CASE ={}
if data_args.train_file is not None:
_SCREAMING_SNAKE_CASE =data_args.train_file
if data_args.validation_file is not None:
_SCREAMING_SNAKE_CASE =data_args.validation_file
_SCREAMING_SNAKE_CASE =data_args.train_file.split('''.''')[-1]
_SCREAMING_SNAKE_CASE =load_dataset(
a__ ,data_files=a__ ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
# Downloading and loading the swag dataset from the hub.
_SCREAMING_SNAKE_CASE =load_dataset(
'''swag''' ,'''regular''' ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_SCREAMING_SNAKE_CASE =[f"ending{i}" for i in range(4)]
_SCREAMING_SNAKE_CASE ='''sent1'''
_SCREAMING_SNAKE_CASE ='''sent2'''
if data_args.max_seq_length is None:
_SCREAMING_SNAKE_CASE =tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''')
_SCREAMING_SNAKE_CASE =1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.")
_SCREAMING_SNAKE_CASE =min(data_args.max_seq_length ,tokenizer.model_max_length)
# Preprocessing the datasets.
def preprocess_function(a__):
_SCREAMING_SNAKE_CASE =[[context] * 4 for context in examples[context_name]]
_SCREAMING_SNAKE_CASE =examples[question_header_name]
_SCREAMING_SNAKE_CASE =[
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(a__)
]
# Flatten out
_SCREAMING_SNAKE_CASE =list(chain(*a__))
_SCREAMING_SNAKE_CASE =list(chain(*a__))
# Tokenize
_SCREAMING_SNAKE_CASE =tokenizer(
a__ ,a__ ,truncation=a__ ,max_length=a__ ,padding='''max_length''' if data_args.pad_to_max_length else False ,)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 ,len(a__) ,4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''')
_SCREAMING_SNAKE_CASE =raw_datasets['''train''']
if data_args.max_train_samples is not None:
_SCREAMING_SNAKE_CASE =min(len(a__) ,data_args.max_train_samples)
_SCREAMING_SNAKE_CASE =train_dataset.select(range(a__))
with training_args.main_process_first(desc='''train dataset map pre-processing'''):
_SCREAMING_SNAKE_CASE =train_dataset.map(
a__ ,batched=a__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''')
_SCREAMING_SNAKE_CASE =raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
_SCREAMING_SNAKE_CASE =min(len(a__) ,data_args.max_eval_samples)
_SCREAMING_SNAKE_CASE =eval_dataset.select(range(a__))
with training_args.main_process_first(desc='''validation dataset map pre-processing'''):
_SCREAMING_SNAKE_CASE =eval_dataset.map(
a__ ,batched=a__ ,num_proc=data_args.preprocessing_num_workers ,load_from_cache_file=not data_args.overwrite_cache ,)
# Data collator
_SCREAMING_SNAKE_CASE =(
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a__ ,pad_to_multiple_of=8 if training_args.fpaa else None)
)
# Metric
def compute_metrics(a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =eval_predictions
_SCREAMING_SNAKE_CASE =np.argmax(a__ ,axis=1)
return {"accuracy": (preds == label_ids).astype(np.floataa).mean().item()}
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,tokenizer=a__ ,data_collator=a__ ,compute_metrics=a__ ,)
# Training
if training_args.do_train:
_SCREAMING_SNAKE_CASE =None
if training_args.resume_from_checkpoint is not None:
_SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_SCREAMING_SNAKE_CASE =last_checkpoint
_SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=a__)
trainer.save_model() # Saves the tokenizer too for easy upload
_SCREAMING_SNAKE_CASE =train_result.metrics
_SCREAMING_SNAKE_CASE =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__)
)
_SCREAMING_SNAKE_CASE =min(a__ ,len(a__))
trainer.log_metrics('''train''' ,a__)
trainer.save_metrics('''train''' ,a__)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a__)
_SCREAMING_SNAKE_CASE =min(a__ ,len(a__))
trainer.log_metrics('''eval''' ,a__)
trainer.save_metrics('''eval''' ,a__)
_SCREAMING_SNAKE_CASE ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__)
else:
trainer.create_model_card(**a__)
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Tuple = random.Random()
if is_torch_available():
import torch
def lowerCamelCase( a__ ,a__=1.0 ,a__=None ,a__=None):
if rng is None:
_SCREAMING_SNAKE_CASE =global_rng
_SCREAMING_SNAKE_CASE =[]
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class A__ ( unittest.TestCase ):
def __init__( self : str , _a : List[str] , _a : List[Any]=7 , _a : str=400 , _a : Union[str, Any]=2000 , _a : Union[str, Any]=1 , _a : int=0.0 , _a : Any=1_6000 , _a : Tuple=True , _a : str=True , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =min_seq_length
_SCREAMING_SNAKE_CASE =max_seq_length
_SCREAMING_SNAKE_CASE =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_SCREAMING_SNAKE_CASE =feature_size
_SCREAMING_SNAKE_CASE =padding_value
_SCREAMING_SNAKE_CASE =sampling_rate
_SCREAMING_SNAKE_CASE =return_attention_mask
_SCREAMING_SNAKE_CASE =do_normalize
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase ( self : str , _a : Dict=False , _a : Optional[Any]=False ) -> Tuple:
"""simple docstring"""
def _flatten(_a : Dict ):
return list(itertools.chain(*_a ) )
if equal_length:
_SCREAMING_SNAKE_CASE =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_SCREAMING_SNAKE_CASE =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ASTFeatureExtractor
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ASTFeatureExtractionTester(self )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_SCREAMING_SNAKE_CASE =[np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
_SCREAMING_SNAKE_CASE =feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
_SCREAMING_SNAKE_CASE =feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test batched
_SCREAMING_SNAKE_CASE =feat_extract(_a , padding=_a , return_tensors='''np''' ).input_values
_SCREAMING_SNAKE_CASE =feat_extract(_a , padding=_a , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_SCREAMING_SNAKE_CASE =[floats_list((1, x) )[0] for x in (800, 800, 800)]
_SCREAMING_SNAKE_CASE =np.asarray(_a )
_SCREAMING_SNAKE_CASE =feat_extract(_a , return_tensors='''np''' ).input_values
_SCREAMING_SNAKE_CASE =feat_extract(_a , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_a , _a ):
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_SCREAMING_SNAKE_CASE =np.random.rand(100 ).astype(np.floataa )
_SCREAMING_SNAKE_CASE =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_SCREAMING_SNAKE_CASE =feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_SCREAMING_SNAKE_CASE =feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCamelCase ( self : List[str] , _a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
from datasets import load_dataset
_SCREAMING_SNAKE_CASE =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_SCREAMING_SNAKE_CASE =ds.sort('''id''' ).select(range(_a ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
_SCREAMING_SNAKE_CASE =self._load_datasamples(1 )
_SCREAMING_SNAKE_CASE =ASTFeatureExtractor()
_SCREAMING_SNAKE_CASE =feature_extractor(_a , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _a , atol=1E-4 ) ) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
snake_case_ : Tuple = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "summarization"
UpperCAmelCase = ["loss"]
UpperCAmelCase = ROUGE_KEYS
UpperCAmelCase = "rouge2"
def __init__( self : List[str] , _a : Any , **_a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if hparams.sortish_sampler and hparams.gpus > 1:
_SCREAMING_SNAKE_CASE =False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(_a , num_labels=_a , mode=self.mode , **_a )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
_SCREAMING_SNAKE_CASE =Path(self.output_dir ) / '''metrics.json'''
_SCREAMING_SNAKE_CASE =Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =defaultdict(_a )
_SCREAMING_SNAKE_CASE =self.config.model_type
_SCREAMING_SNAKE_CASE =self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
_SCREAMING_SNAKE_CASE ={
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_SCREAMING_SNAKE_CASE ={
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
_SCREAMING_SNAKE_CASE ={k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_SCREAMING_SNAKE_CASE ={
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}"
assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}"
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_SCREAMING_SNAKE_CASE =get_git_info()['''repo_sha''']
_SCREAMING_SNAKE_CASE =hparams.num_workers
_SCREAMING_SNAKE_CASE =None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , _a ):
_SCREAMING_SNAKE_CASE =self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_SCREAMING_SNAKE_CASE =self.decoder_start_token_id
_SCREAMING_SNAKE_CASE =(
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_SCREAMING_SNAKE_CASE =self.hparams.eval_max_gen_length
else:
_SCREAMING_SNAKE_CASE =self.model.config.max_length
_SCREAMING_SNAKE_CASE =self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __UpperCamelCase ( self : Optional[int] , _a : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(_a , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
_SCREAMING_SNAKE_CASE =True
return readable_batch
def __UpperCamelCase ( self : Any , _a : Optional[int] , **_a : Optional[Any] ) -> int:
"""simple docstring"""
return self.model(_a , **_a )
def __UpperCamelCase ( self : Optional[Any] , _a : List[int] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer.batch_decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
return lmap(str.strip , _a )
def __UpperCamelCase ( self : List[Any] , _a : dict ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer.pad_token_id
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =batch['''input_ids'''], batch['''attention_mask''']
_SCREAMING_SNAKE_CASE =batch['''labels''']
if isinstance(self.model , _a ):
_SCREAMING_SNAKE_CASE =self.model._shift_right(_a )
else:
_SCREAMING_SNAKE_CASE =shift_tokens_right(_a , _a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_SCREAMING_SNAKE_CASE =decoder_input_ids
self.save_readable_batch(_a )
_SCREAMING_SNAKE_CASE =self(_a , attention_mask=_a , decoder_input_ids=_a , use_cache=_a )
_SCREAMING_SNAKE_CASE =outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_SCREAMING_SNAKE_CASE =nn.CrossEntropyLoss(ignore_index=_a )
assert lm_logits.shape[-1] == self.vocab_size
_SCREAMING_SNAKE_CASE =ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =nn.functional.log_softmax(_a , dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =label_smoothed_nll_loss(
_a , _a , self.hparams.label_smoothing , ignore_index=_a )
return (loss,)
@property
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return self.tokenizer.pad_token_id
def __UpperCamelCase ( self : List[Any] , _a : Tuple , _a : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._step(_a )
_SCREAMING_SNAKE_CASE =dict(zip(self.loss_names , _a ) )
# tokens per batch
_SCREAMING_SNAKE_CASE =batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
_SCREAMING_SNAKE_CASE =batch['''input_ids'''].shape[0]
_SCREAMING_SNAKE_CASE =batch['''input_ids'''].eq(self.pad ).sum()
_SCREAMING_SNAKE_CASE =batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __UpperCamelCase ( self : str , _a : Dict , _a : List[Any] ) -> Dict:
"""simple docstring"""
return self._generative_step(_a )
def __UpperCamelCase ( self : Any , _a : Tuple , _a : List[str]="val" ) -> Dict:
"""simple docstring"""
self.step_count += 1
_SCREAMING_SNAKE_CASE ={k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_SCREAMING_SNAKE_CASE =losses['''loss''']
_SCREAMING_SNAKE_CASE ={
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
_SCREAMING_SNAKE_CASE =(
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_SCREAMING_SNAKE_CASE =torch.tensor(_a ).type_as(_a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_a )
_SCREAMING_SNAKE_CASE ={f"{prefix}_avg_{k}": x for k, x in losses.items()}
_SCREAMING_SNAKE_CASE =self.step_count
self.metrics[prefix].append(_a ) # callback writes this to self.metrics_save_path
_SCREAMING_SNAKE_CASE =flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"{prefix}_loss": loss,
f"{prefix}_{self.val_metric}": metric_tensor,
}
def __UpperCamelCase ( self : Any , _a : Optional[Any] , _a : int ) -> Dict:
"""simple docstring"""
return calculate_rouge(_a , _a )
def __UpperCamelCase ( self : Tuple , _a : dict ) -> dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_SCREAMING_SNAKE_CASE =self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=_a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_SCREAMING_SNAKE_CASE =(time.time() - ta) / batch['''input_ids'''].shape[0]
_SCREAMING_SNAKE_CASE =self.ids_to_clean_text(_a )
_SCREAMING_SNAKE_CASE =self.ids_to_clean_text(batch['''labels'''] )
_SCREAMING_SNAKE_CASE =self._step(_a )
_SCREAMING_SNAKE_CASE =dict(zip(self.loss_names , _a ) )
_SCREAMING_SNAKE_CASE =self.calc_generative_metrics(_a , _a )
_SCREAMING_SNAKE_CASE =np.mean(lmap(_a , _a ) )
base_metrics.update(gen_time=_a , gen_len=_a , preds=_a , target=_a , **_a )
return base_metrics
def __UpperCamelCase ( self : str , _a : Any , _a : Tuple ) -> Any:
"""simple docstring"""
return self._generative_step(_a )
def __UpperCamelCase ( self : Optional[int] , _a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.validation_epoch_end(_a , prefix='''test''' )
def __UpperCamelCase ( self : str , _a : Optional[int] ) -> SeqaSeqDataset:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.n_obs[type_path]
_SCREAMING_SNAKE_CASE =self.target_lens[type_path]
_SCREAMING_SNAKE_CASE =self.dataset_class(
self.tokenizer , type_path=_a , n_obs=_a , max_target_length=_a , **self.dataset_kwargs , )
return dataset
def __UpperCamelCase ( self : Optional[Any] , _a : str , _a : int , _a : bool = False ) -> DataLoader:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dataset(_a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_SCREAMING_SNAKE_CASE =dataset.make_sortish_sampler(_a , distributed=self.hparams.gpus > 1 )
return DataLoader(
_a , batch_size=_a , collate_fn=dataset.collate_fn , shuffle=_a , num_workers=self.num_workers , sampler=_a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_SCREAMING_SNAKE_CASE =dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
_a , batch_sampler=_a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
_a , batch_size=_a , collate_fn=dataset.collate_fn , shuffle=_a , num_workers=self.num_workers , sampler=_a , )
def __UpperCamelCase ( self : Union[str, Any] ) -> DataLoader:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=_a )
return dataloader
def __UpperCamelCase ( self : Dict ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def __UpperCamelCase ( self : Union[str, Any] ) -> DataLoader:
"""simple docstring"""
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __UpperCamelCase ( _a : Any , _a : Any ) -> List[str]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(_a , _a )
add_generic_args(_a , _a )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=_a , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=_a , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=_a , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=_a , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=_a )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=_a )
parser.add_argument('''--max_tokens_per_batch''' , type=_a , default=_a )
parser.add_argument('''--logger_name''' , type=_a , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=_a , default=-1 , required=_a , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=_a , default=500 , required=_a , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=_a , default=-1 , required=_a , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=_a , default='''summarization''' , required=_a , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=_a , default=0.0 , required=_a )
parser.add_argument('''--src_lang''' , type=_a , default='''''' , required=_a )
parser.add_argument('''--tgt_lang''' , type=_a , default='''''' , required=_a )
parser.add_argument('''--eval_beams''' , type=_a , default=_a , required=_a )
parser.add_argument(
'''--val_metric''' , type=_a , default=_a , required=_a , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=_a , default=_a , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=_a , default=1 , required=_a , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=_a , default=-1 , required=_a , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "translation"
UpperCAmelCase = ["loss"]
UpperCAmelCase = ["bleu"]
UpperCAmelCase = "bleu"
def __init__( self : Union[str, Any] , _a : List[Any] , **_a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_a , **_a )
_SCREAMING_SNAKE_CASE =hparams.src_lang
_SCREAMING_SNAKE_CASE =hparams.tgt_lang
def __UpperCamelCase ( self : Optional[Any] , _a : int , _a : List[Any] ) -> dict:
"""simple docstring"""
return calculate_bleu(_a , _a )
def lowerCamelCase( a__ ,a__=None):
Path(args.output_dir).mkdir(exist_ok=a__)
check_output_dir(a__ ,expected_items=3)
if model is None:
if "summarization" in args.task:
_SCREAMING_SNAKE_CASE =SummarizationModule(a__)
else:
_SCREAMING_SNAKE_CASE =TranslationModule(a__)
_SCREAMING_SNAKE_CASE =Path(args.data_dir).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir).startswith('''/tmp''')
or str(args.output_dir).startswith('''/var''')
):
_SCREAMING_SNAKE_CASE =True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_SCREAMING_SNAKE_CASE =os.environ.get('''WANDB_PROJECT''' ,a__)
_SCREAMING_SNAKE_CASE =WandbLogger(name=model.output_dir.name ,project=a__)
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_SCREAMING_SNAKE_CASE =WandbLogger(name=model.output_dir.name ,project=f"hf_{dataset}")
if args.early_stopping_patience >= 0:
_SCREAMING_SNAKE_CASE =get_early_stopping_callback(model.val_metric ,args.early_stopping_patience)
else:
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =args.val_metric == '''loss'''
_SCREAMING_SNAKE_CASE =generic_train(
a__ ,a__ ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback(
args.output_dir ,model.val_metric ,args.save_top_k ,a__) ,early_stopping_callback=a__ ,logger=a__ ,)
pickle_save(model.hparams ,model.output_dir / '''hparams.pkl''')
if not args.do_predict:
return model
_SCREAMING_SNAKE_CASE =''''''
_SCREAMING_SNAKE_CASE =sorted(glob.glob(os.path.join(args.output_dir ,'''*.ckpt''') ,recursive=a__))
if checkpoints:
_SCREAMING_SNAKE_CASE =checkpoints[-1]
_SCREAMING_SNAKE_CASE =checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams)
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
snake_case_ : Tuple = pl.Trainer.add_argparse_args(parser)
snake_case_ : str = SummarizationModule.add_model_specific_args(parser, os.getcwd())
snake_case_ : Union[str, Any] = parser.parse_args()
main(args) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def lowerCamelCase( a__ ,a__):
assert isinstance(a__ ,a__)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' ,[False, True])
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =tmp_path / '''cache'''
_SCREAMING_SNAKE_CASE ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_SCREAMING_SNAKE_CASE =SqlDatasetReader(
'''dataset''' ,'''sqlite:///''' + sqlite_path ,cache_dir=a__ ,keep_in_memory=a__).read()
_check_sql_dataset(a__ ,a__)
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' ,[
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] ,)
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =tmp_path / '''cache'''
_SCREAMING_SNAKE_CASE ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_SCREAMING_SNAKE_CASE =features.copy() if features else default_expected_features
_SCREAMING_SNAKE_CASE =(
Features({feature: Value(a__) for feature, dtype in features.items()}) if features is not None else None
)
_SCREAMING_SNAKE_CASE =SqlDatasetReader('''dataset''' ,'''sqlite:///''' + sqlite_path ,features=a__ ,cache_dir=a__).read()
_check_sql_dataset(a__ ,a__)
def lowerCamelCase( a__):
with contextlib.closing(sqlitea.connect(a__)) as con:
_SCREAMING_SNAKE_CASE =con.cursor()
cur.execute('''SELECT * FROM dataset''')
for row in cur:
yield row
@require_sqlalchemy
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =tmp_path / '''cache'''
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''tmp.sql''')
_SCREAMING_SNAKE_CASE =SqlDatasetReader('''dataset''' ,'''sqlite:///''' + sqlite_path ,cache_dir=a__).read()
SqlDatasetWriter(a__ ,'''dataset''' ,'''sqlite:///''' + output_sqlite_path ,num_proc=1).write()
_SCREAMING_SNAKE_CASE =iter_sql_file(a__)
_SCREAMING_SNAKE_CASE =iter_sql_file(a__)
for rowa, rowa in zip(a__ ,a__):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =tmp_path / '''cache'''
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''tmp.sql''')
_SCREAMING_SNAKE_CASE =SqlDatasetReader('''dataset''' ,'''sqlite:///''' + sqlite_path ,cache_dir=a__).read()
SqlDatasetWriter(a__ ,'''dataset''' ,'''sqlite:///''' + output_sqlite_path ,num_proc=2).write()
_SCREAMING_SNAKE_CASE =iter_sql_file(a__)
_SCREAMING_SNAKE_CASE =iter_sql_file(a__)
for rowa, rowa in zip(a__ ,a__):
assert rowa == rowa
@require_sqlalchemy
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =tmp_path / '''cache'''
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''tmp.sql''')
_SCREAMING_SNAKE_CASE =SqlDatasetReader('''dataset''' ,'''sqlite:///''' + sqlite_path ,cache_dir=a__).read()
with pytest.raises(a__):
SqlDatasetWriter(a__ ,'''dataset''' ,'''sqlite:///''' + output_sqlite_path ,num_proc=0).write() | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Tuple = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
snake_case_ : Dict = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case_ : Any = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
snake_case_ : str = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
snake_case_ : str = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
snake_case_ : Optional[int] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
snake_case_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
snake_case_ : Union[str, Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
snake_case_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
snake_case_ : Optional[int] = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
snake_case_ : int = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
snake_case_ : Optional[int] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
snake_case_ : Any = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
snake_case_ : Optional[Any] = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
snake_case_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
snake_case_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
snake_case_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
snake_case_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
snake_case_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
snake_case_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
snake_case_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
snake_case_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
snake_case_ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
snake_case_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
snake_case_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
snake_case_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_MAPPING
snake_case_ : List[str] = auto_class_update(FlaxAutoModel)
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
snake_case_ : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
snake_case_ : List[str] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
snake_case_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
snake_case_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case_ : Optional[Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
snake_case_ : List[str] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ : int = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
snake_case_ : int = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
snake_case_ : Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
snake_case_ : Any = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
snake_case_ : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class A__ ( _BaseAutoModelClass ):
UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
snake_case_ : Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
) | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
snake_case_ : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
} | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["image_processor", "tokenizer"]
UpperCAmelCase = "CLIPImageProcessor"
UpperCAmelCase = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : List[str] , _a : Dict=None , _a : Optional[int]=None , **_a : List[str] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_SCREAMING_SNAKE_CASE =kwargs.pop('''feature_extractor''' )
_SCREAMING_SNAKE_CASE =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self : Optional[int] , _a : str=None , _a : Dict=None , _a : int=None , **_a : List[str] ) -> int:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_SCREAMING_SNAKE_CASE =self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_SCREAMING_SNAKE_CASE =self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_SCREAMING_SNAKE_CASE =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __UpperCamelCase ( self : Dict , *_a : Union[str, Any] , **_a : Optional[Any] ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCamelCase ( self : Union[str, Any] , *_a : Dict , **_a : List[Any] ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCamelCase( a__ ,a__ ,a__ ,a__ = 100 ,):
_SCREAMING_SNAKE_CASE =x_start
_SCREAMING_SNAKE_CASE =fnc(a__)
_SCREAMING_SNAKE_CASE =0.0
for _ in range(a__):
# Approximates curve as a sequence of linear lines and sums their length
_SCREAMING_SNAKE_CASE =(x_end - x_start) / steps + xa
_SCREAMING_SNAKE_CASE =fnc(a__)
length += math.hypot(xa - xa ,fxa - fxa)
# Increment step
_SCREAMING_SNAKE_CASE =xa
_SCREAMING_SNAKE_CASE =fxa
return length
if __name__ == "__main__":
def lowerCamelCase( a__):
return math.sin(10 * x)
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
snake_case_ : str = 10
while i <= 10_00_00:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10 | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
snake_case_ : str = TypeVar('''T''')
snake_case_ : List[Any] = TypeVar('''U''')
class A__ ( Generic[T, U] ):
def __init__( self : List[Any] , _a : T | None , _a : U | None ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =key
_SCREAMING_SNAKE_CASE =val
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
def __repr__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class A__ ( Generic[T, U] ):
def __init__( self : List[Any] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DoubleLinkedListNode(_a , _a )
_SCREAMING_SNAKE_CASE =DoubleLinkedListNode(_a , _a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.rear, self.head
def __repr__( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''DoubleLinkedList''']
_SCREAMING_SNAKE_CASE =self.head
while node.next is not None:
rep.append(str(_a ) )
_SCREAMING_SNAKE_CASE =node.next
rep.append(str(self.rear ) )
return ",\n ".join(_a )
def __UpperCamelCase ( self : Optional[Any] , _a : DoubleLinkedListNode[T, U] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_SCREAMING_SNAKE_CASE =node
_SCREAMING_SNAKE_CASE =previous
_SCREAMING_SNAKE_CASE =node
_SCREAMING_SNAKE_CASE =self.rear
def __UpperCamelCase ( self : Optional[Any] , _a : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_SCREAMING_SNAKE_CASE =node.next
_SCREAMING_SNAKE_CASE =node.prev
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
return node
class A__ ( Generic[T, U] ):
UpperCAmelCase = {}
def __init__( self : Any , _a : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DoubleLinkedList()
_SCREAMING_SNAKE_CASE =capacity
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : Dict , _a : T ) -> bool:
"""simple docstring"""
return key in self.cache
def __UpperCamelCase ( self : List[Any] , _a : T ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_SCREAMING_SNAKE_CASE =self.cache[key]
_SCREAMING_SNAKE_CASE =self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_a )
return node.val
self.miss += 1
return None
def __UpperCamelCase ( self : List[Any] , _a : T , _a : U ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_SCREAMING_SNAKE_CASE =self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_SCREAMING_SNAKE_CASE =DoubleLinkedListNode(_a , _a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_SCREAMING_SNAKE_CASE =self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_SCREAMING_SNAKE_CASE =value
self.list.add(_a )
@classmethod
def __UpperCamelCase ( cls : List[str] , _a : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(_a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*_a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_SCREAMING_SNAKE_CASE =LRUCache(_a )
_SCREAMING_SNAKE_CASE =cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_SCREAMING_SNAKE_CASE =func(*_a )
cls.decorator_function_to_instance_map[func].put(args[0] , _a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_a , '''cache_info''' , _a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : str = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class A__ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = "swin"
UpperCAmelCase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , _a : List[str]=224 , _a : Optional[int]=4 , _a : Dict=3 , _a : str=96 , _a : Union[str, Any]=[2, 2, 6, 2] , _a : str=[3, 6, 12, 24] , _a : int=7 , _a : str=4.0 , _a : int=True , _a : int=0.0 , _a : Union[str, Any]=0.0 , _a : Dict=0.1 , _a : List[Any]="gelu" , _a : Any=False , _a : List[str]=0.02 , _a : Dict=1E-5 , _a : Optional[int]=32 , _a : Tuple=None , _a : List[str]=None , **_a : Any , ) -> str:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embed_dim
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =num_heads
_SCREAMING_SNAKE_CASE =window_size
_SCREAMING_SNAKE_CASE =mlp_ratio
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =drop_path_rate
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =use_absolute_embeddings
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_SCREAMING_SNAKE_CASE =int(embed_dim * 2 ** (len(_a ) - 1) )
_SCREAMING_SNAKE_CASE =['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_a ) + 1 )]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = version.parse("1.11" )
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCamelCase ( self : Any ) -> float:
"""simple docstring"""
return 1E-4 | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
import math
class A__ :
def __UpperCamelCase ( self : Any , _a : list[list[float]] , _a : list[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0.0
_SCREAMING_SNAKE_CASE =0.0
for i in range(len(_a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def __UpperCamelCase ( self : Union[str, Any] , _a : list[list[int | float]] , _a : list[int] , _a : int , _a : float ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(_a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCamelCase( ):
# Training Examples ( m, n )
_SCREAMING_SNAKE_CASE =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_SCREAMING_SNAKE_CASE =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_SCREAMING_SNAKE_CASE =SelfOrganizingMap()
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =0.5
for _ in range(a__):
for j in range(len(a__)):
# training sample
_SCREAMING_SNAKE_CASE =training_samples[j]
# Compute the winning vector
_SCREAMING_SNAKE_CASE =self_organizing_map.get_winner(a__ ,a__)
# Update the winning vector
_SCREAMING_SNAKE_CASE =self_organizing_map.update(a__ ,a__ ,a__ ,a__)
# classify test sample
_SCREAMING_SNAKE_CASE =[0, 0, 0, 1]
_SCREAMING_SNAKE_CASE =self_organizing_map.get_winner(a__ ,a__)
# results
print(f"Clusters that the test sample belongs to : {winner}")
print(f"Weights that have been trained : {weights}")
# running the main() function
if __name__ == "__main__":
main() | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
class A__ : # Public class to implement a graph
def __init__( self : str , _a : int , _a : int , _a : list[list[bool]] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =row
_SCREAMING_SNAKE_CASE =col
_SCREAMING_SNAKE_CASE =graph
def __UpperCamelCase ( self : Tuple , _a : int , _a : int , _a : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __UpperCamelCase ( self : Any , _a : int , _a : int , _a : list[list[bool]] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_SCREAMING_SNAKE_CASE =[-1, 0, 1, -1, 1, -1, 0, 1]
_SCREAMING_SNAKE_CASE =True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _a )
def __UpperCamelCase ( self : Optional[int] ) -> int: # And finally, count all islands.
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[[False for j in range(self.COL )] for i in range(self.ROW )]
_SCREAMING_SNAKE_CASE =0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_a , _a , _a )
count += 1
return count | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
from __future__ import annotations
def lowerCamelCase( a__ ,a__ ,a__ ,):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError('''You cannot supply more or less than 2 values''')
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative in a semiconductor''')
elif hole_conc < 0:
raise ValueError('''Hole concentration cannot be negative in a semiconductor''')
elif intrinsic_conc < 0:
raise ValueError(
'''Intrinsic concentration cannot be negative in a semiconductor''')
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
snake_case_ : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class A__ ( datasets.BuilderConfig ):
UpperCAmelCase = 10000
UpperCAmelCase = None
UpperCAmelCase = None
class A__ ( datasets.ArrowBasedBuilder ):
UpperCAmelCase = ParquetConfig
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Optional[Any] , _a : Tuple ) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_SCREAMING_SNAKE_CASE =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE =data_files
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_SCREAMING_SNAKE_CASE =[]
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_a ):
with open(_a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =datasets.Features.from_arrow_schema(pq.read_schema(_a ) )
break
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __UpperCamelCase ( self : Optional[int] , _a : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE =table_cast(_a , self.info.features.arrow_schema )
return pa_table
def __UpperCamelCase ( self : Optional[Any] , _a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
with open(_a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =pq.ParquetFile(_a )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_SCREAMING_SNAKE_CASE =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"{file_idx}_{batch_idx}", self._cast_table(_a )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(_a )}: {e}" )
raise | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCamelCase__ ) , "Tatoeba directory does not exist." )
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
return TatoebaConverter(save_dir=_a )
@slow
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_a )
assert mmeta["long_pair"] == "heb-eng" | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
def lowerCamelCase( a__):
if n_term == "":
return []
_SCREAMING_SNAKE_CASE =[]
for temp in range(int(a__)):
series.append(f"1/{temp + 1}" if series else '''1''')
return series
if __name__ == "__main__":
snake_case_ : Optional[int] = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term)) | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __UpperCamelCase ( *_a : Tuple , **_a : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =np.array(a__)
_SCREAMING_SNAKE_CASE =npimg.shape
return {"hash": hashimage(a__), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
UpperCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCamelCase ( self : Dict , _a : int , _a : Dict , _a : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =MaskGenerationPipeline(model=_a , image_processor=_a )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCamelCase ( self : Union[str, Any] , _a : List[Any] , _a : Any ) -> List[Any]:
"""simple docstring"""
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
@slow
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
_SCREAMING_SNAKE_CASE =image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
_SCREAMING_SNAKE_CASE =[]
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.04_44},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_21},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.01_67},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.01_32},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.00_53},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.99_67},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_93},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.99_09},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.98_79},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.98_34},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.97_16},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.96_12},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.95_99},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.95_52},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.95_32},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.95_16},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.94_99},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.94_83},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.94_64},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_43},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_43},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.94_08},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.93_35},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.93_26},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.92_62},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.89_99},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.89_86},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.89_84},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.88_73},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''facebook/sam-vit-huge'''
_SCREAMING_SNAKE_CASE =pipeline('''mask-generation''' , model=_a )
_SCREAMING_SNAKE_CASE =image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_SCREAMING_SNAKE_CASE =[]
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_a ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.04_44},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.02_10},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.01_67},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.01_32},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.00_53},
] , ) | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
snake_case_ : Dict = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def lowerCamelCase( a__ ,a__ ,a__ ,a__=None):
# Initialise PyTorch model
_SCREAMING_SNAKE_CASE =XLNetConfig.from_json_file(a__)
_SCREAMING_SNAKE_CASE =finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}")
_SCREAMING_SNAKE_CASE =finetuning_task
_SCREAMING_SNAKE_CASE =GLUE_TASKS_NUM_LABELS[finetuning_task]
_SCREAMING_SNAKE_CASE =XLNetForSequenceClassification(a__)
elif "squad" in finetuning_task:
_SCREAMING_SNAKE_CASE =finetuning_task
_SCREAMING_SNAKE_CASE =XLNetForQuestionAnswering(a__)
else:
_SCREAMING_SNAKE_CASE =XLNetLMHeadModel(a__)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(a__ ,a__ ,a__)
# Save pytorch-model
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,a__)
print(f"Save PyTorch model to {os.path.abspath(a__)}")
torch.save(model.state_dict() ,a__)
print(f"Save configuration file to {os.path.abspath(a__)}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
snake_case_ : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class A__ ( UpperCamelCase__ ):
def __init__( self : Union[str, Any] , _a : str=None , _a : Optional[int]=None , *_a : Union[str, Any] , **_a : Optional[int] ) -> int:
"""simple docstring"""
super().__init__(*_a , **_a )
if config is None:
assert isinstance(self.model , _a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
_SCREAMING_SNAKE_CASE =self.model.config
else:
_SCREAMING_SNAKE_CASE =config
_SCREAMING_SNAKE_CASE =data_args
_SCREAMING_SNAKE_CASE =self.config.tgt_vocab_size if isinstance(self.config , _a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
''' padding..''' )
if self.args.label_smoothing == 0:
_SCREAMING_SNAKE_CASE =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_SCREAMING_SNAKE_CASE =label_smoothed_nll_loss
def __UpperCamelCase ( self : Tuple , _a : int ) -> Union[str, Any]:
"""simple docstring"""
if self.optimizer is None:
_SCREAMING_SNAKE_CASE =['''bias''', '''LayerNorm.weight''']
_SCREAMING_SNAKE_CASE =[
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
_SCREAMING_SNAKE_CASE =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_SCREAMING_SNAKE_CASE =Adafactor
_SCREAMING_SNAKE_CASE ={'''scale_parameter''': False, '''relative_step''': False}
else:
_SCREAMING_SNAKE_CASE =AdamW
_SCREAMING_SNAKE_CASE ={
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
_SCREAMING_SNAKE_CASE =self.args.learning_rate
if self.sharded_ddp:
_SCREAMING_SNAKE_CASE =OSS(
params=_a , optim=_a , **_a , )
else:
_SCREAMING_SNAKE_CASE =optimizer_cls(_a , **_a )
if self.lr_scheduler is None:
_SCREAMING_SNAKE_CASE =self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def __UpperCamelCase ( self : Tuple , _a : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_SCREAMING_SNAKE_CASE =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_SCREAMING_SNAKE_CASE =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_SCREAMING_SNAKE_CASE =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=_a )
return scheduler
def __UpperCamelCase ( self : Tuple ) -> Optional[torch.utils.data.Sampler]:
"""simple docstring"""
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __UpperCamelCase ( self : Optional[int] , _a : Optional[Any] , _a : Optional[Any] , _a : Optional[int] ) -> str:
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_SCREAMING_SNAKE_CASE =model(**_a , use_cache=_a )[0]
_SCREAMING_SNAKE_CASE =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model(**_a , labels=_a , use_cache=_a )[:2]
else:
# compute label smoothed loss
_SCREAMING_SNAKE_CASE =model(**_a , use_cache=_a )[0]
_SCREAMING_SNAKE_CASE =torch.nn.functional.log_softmax(_a , dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.loss_fn(_a , _a , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __UpperCamelCase ( self : int , _a : Union[str, Any] , _a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =inputs.pop('''labels''' )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._compute_loss(_a , _a , _a )
return loss
def __UpperCamelCase ( self : Optional[int] , _a : nn.Module , _a : Dict[str, Union[torch.Tensor, Any]] , _a : bool , _a : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._prepare_inputs(_a )
_SCREAMING_SNAKE_CASE ={
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_SCREAMING_SNAKE_CASE =self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **_a , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_SCREAMING_SNAKE_CASE =self._pad_tensors_to_max_len(_a , gen_kwargs['''max_length'''] )
_SCREAMING_SNAKE_CASE =inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._compute_loss(_a , _a , _a )
_SCREAMING_SNAKE_CASE =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_SCREAMING_SNAKE_CASE =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_SCREAMING_SNAKE_CASE =self._pad_tensors_to_max_len(_a , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def __UpperCamelCase ( self : str , _a : Optional[int] , _a : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
f" padded to `max_length`={max_length}" )
_SCREAMING_SNAKE_CASE =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_SCREAMING_SNAKE_CASE =tensor
return padded_tensor | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
snake_case_ : Any = False
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Any = '''ybelkada/fonts'''
def lowerCamelCase( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
f"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''')
def lowerCamelCase( a__ ,a__ ,a__):
requires_backends(a__ ,['''torch'''])
_check_torch_version()
_SCREAMING_SNAKE_CASE =image_tensor.unsqueeze(0)
_SCREAMING_SNAKE_CASE =torch.nn.functional.unfold(a__ ,(patch_height, patch_width) ,stride=(patch_height, patch_width))
_SCREAMING_SNAKE_CASE =patches.reshape(image_tensor.size(0) ,image_tensor.size(1) ,a__ ,a__ ,-1)
_SCREAMING_SNAKE_CASE =patches.permute(0 ,4 ,2 ,3 ,1).reshape(
image_tensor.size(2) // patch_height ,image_tensor.size(3) // patch_width ,image_tensor.size(1) * patch_height * patch_width ,)
return patches.unsqueeze(0)
def lowerCamelCase( a__ ,a__ = 36 ,a__ = "black" ,a__ = "white" ,a__ = 5 ,a__ = 5 ,a__ = 5 ,a__ = 5 ,a__ = None ,a__ = None ,):
requires_backends(a__ ,'''vision''')
# Add new lines so that each line is no more than 80 characters.
_SCREAMING_SNAKE_CASE =textwrap.TextWrapper(width=80)
_SCREAMING_SNAKE_CASE =wrapper.wrap(text=a__)
_SCREAMING_SNAKE_CASE ='''\n'''.join(a__)
if font_bytes is not None and font_path is None:
_SCREAMING_SNAKE_CASE =io.BytesIO(a__)
elif font_path is not None:
_SCREAMING_SNAKE_CASE =font_path
else:
_SCREAMING_SNAKE_CASE =hf_hub_download(a__ ,'''Arial.TTF''')
_SCREAMING_SNAKE_CASE =ImageFont.truetype(a__ ,encoding='''UTF-8''' ,size=a__)
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
_SCREAMING_SNAKE_CASE =ImageDraw.Draw(Image.new('''RGB''' ,(1, 1) ,a__))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =temp_draw.textbbox((0, 0) ,a__ ,a__)
# Create the actual image with a bit of padding around the text.
_SCREAMING_SNAKE_CASE =text_width + left_padding + right_padding
_SCREAMING_SNAKE_CASE =text_height + top_padding + bottom_padding
_SCREAMING_SNAKE_CASE =Image.new('''RGB''' ,(image_width, image_height) ,a__)
_SCREAMING_SNAKE_CASE =ImageDraw.Draw(a__)
draw.text(xy=(left_padding, top_padding) ,text=a__ ,fill=a__ ,font=a__)
return image
def lowerCamelCase( a__ ,a__ ,**a__):
requires_backends(a__ ,'''vision''')
# Convert to PIL image if necessary
_SCREAMING_SNAKE_CASE =to_pil_image(a__)
_SCREAMING_SNAKE_CASE =render_text(a__ ,**a__)
_SCREAMING_SNAKE_CASE =max(header_image.width ,image.width)
_SCREAMING_SNAKE_CASE =int(image.height * (new_width / image.width))
_SCREAMING_SNAKE_CASE =int(header_image.height * (new_width / header_image.width))
_SCREAMING_SNAKE_CASE =Image.new('''RGB''' ,(new_width, new_height + new_header_height) ,'''white''')
new_image.paste(header_image.resize((new_width, new_header_height)) ,(0, 0))
new_image.paste(image.resize((new_width, new_height)) ,(0, new_header_height))
# Convert back to the original framework if necessary
_SCREAMING_SNAKE_CASE =to_numpy_array(a__)
if infer_channel_dimension_format(a__) == ChannelDimension.LAST:
_SCREAMING_SNAKE_CASE =to_channel_dimension_format(a__ ,ChannelDimension.LAST)
return new_image
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = ["flattened_patches"]
def __init__( self : Optional[int] , _a : bool = True , _a : bool = True , _a : Dict[str, int] = None , _a : int = 2048 , _a : bool = False , **_a : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb
_SCREAMING_SNAKE_CASE =max_patches
_SCREAMING_SNAKE_CASE =is_vqa
def __UpperCamelCase ( self : str , _a : np.ndarray , _a : int , _a : dict , **_a : int ) -> np.ndarray:
"""simple docstring"""
requires_backends(self.extract_flattened_patches , '''torch''' )
_check_torch_version()
# convert to torch
_SCREAMING_SNAKE_CASE =to_channel_dimension_format(_a , ChannelDimension.FIRST )
_SCREAMING_SNAKE_CASE =torch.from_numpy(_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =patch_size['''height'''], patch_size['''width''']
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =get_image_size(_a )
# maximize scale s.t.
_SCREAMING_SNAKE_CASE =math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
_SCREAMING_SNAKE_CASE =max(min(math.floor(scale * image_height / patch_height ) , _a ) , 1 )
_SCREAMING_SNAKE_CASE =max(min(math.floor(scale * image_width / patch_width ) , _a ) , 1 )
_SCREAMING_SNAKE_CASE =max(num_feasible_rows * patch_height , 1 )
_SCREAMING_SNAKE_CASE =max(num_feasible_cols * patch_width , 1 )
_SCREAMING_SNAKE_CASE =torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='''bilinear''' , align_corners=_a , antialias=_a , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
_SCREAMING_SNAKE_CASE =torch_extract_patches(_a , _a , _a )
_SCREAMING_SNAKE_CASE =patches.shape
_SCREAMING_SNAKE_CASE =patches_shape[1]
_SCREAMING_SNAKE_CASE =patches_shape[2]
_SCREAMING_SNAKE_CASE =patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
_SCREAMING_SNAKE_CASE =patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
_SCREAMING_SNAKE_CASE =torch.arange(_a ).reshape([rows, 1] ).repeat(1 , _a ).reshape([rows * columns, 1] )
_SCREAMING_SNAKE_CASE =torch.arange(_a ).reshape([1, columns] ).repeat(_a , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
_SCREAMING_SNAKE_CASE =row_ids.to(torch.floataa )
_SCREAMING_SNAKE_CASE =col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
_SCREAMING_SNAKE_CASE =torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
_SCREAMING_SNAKE_CASE =torch.nn.functional.pad(_a , [0, 0, 0, max_patches - (rows * columns)] ).float()
_SCREAMING_SNAKE_CASE =to_numpy_array(_a )
return result
def __UpperCamelCase ( self : Union[str, Any] , _a : np.ndarray , _a : Optional[Union[str, ChannelDimension]] = None , **_a : str ) -> np.ndarray:
"""simple docstring"""
if image.dtype == np.uinta:
_SCREAMING_SNAKE_CASE =image.astype(np.floataa )
# take mean across the whole `image`
_SCREAMING_SNAKE_CASE =np.mean(_a )
_SCREAMING_SNAKE_CASE =np.std(_a )
_SCREAMING_SNAKE_CASE =max(_a , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(_a , mean=_a , std=_a , **_a )
def __UpperCamelCase ( self : Any , _a : ImageInput , _a : Optional[str] = None , _a : bool = None , _a : Optional[bool] = None , _a : Optional[int] = None , _a : Optional[Dict[str, int]] = None , _a : Optional[Union[str, TensorType]] = None , _a : ChannelDimension = ChannelDimension.FIRST , **_a : Tuple , ) -> ImageInput:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE =patch_size if patch_size is not None else self.patch_size
_SCREAMING_SNAKE_CASE =max_patches if max_patches is not None else self.max_patches
_SCREAMING_SNAKE_CASE =self.is_vqa
if kwargs.get('''data_format''' , _a ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
_SCREAMING_SNAKE_CASE =make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE =[convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE =[to_numpy_array(_a ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''font_bytes''' , _a )
_SCREAMING_SNAKE_CASE =kwargs.pop('''font_path''' , _a )
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[header_text] * len(_a )
_SCREAMING_SNAKE_CASE =[
render_header(_a , header_text[i] , font_bytes=_a , font_path=_a )
for i, image in enumerate(_a )
]
if do_normalize:
_SCREAMING_SNAKE_CASE =[self.normalize(image=_a ) for image in images]
# convert to torch tensor and permute
_SCREAMING_SNAKE_CASE =[
self.extract_flattened_patches(image=_a , max_patches=_a , patch_size=_a )
for image in images
]
# create attention mask in numpy
_SCREAMING_SNAKE_CASE =[(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
_SCREAMING_SNAKE_CASE =BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} , tensor_type=_a )
return encoded_outputs | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
from __future__ import annotations
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =set(a__), [start]
while stack:
_SCREAMING_SNAKE_CASE =stack.pop()
explored.add(a__)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(a__)
return explored
snake_case_ : int = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A''')) | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
snake_case_ : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
class A__ ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCAmelCase = None
UpperCAmelCase = None
class A__ ( folder_based_builder.FolderBasedBuilder ):
UpperCAmelCase = datasets.Audio()
UpperCAmelCase = "audio"
UpperCAmelCase = AudioFolderConfig
UpperCAmelCase = 42 # definition at the bottom of the script
UpperCAmelCase = AudioClassification(audio_column="audio" , label_column="label" )
snake_case_ : Tuple = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
snake_case_ : int = AUDIO_EXTENSIONS | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
def lowerCamelCase( a__ = 100):
_SCREAMING_SNAKE_CASE =set()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =n + 1 # maximum limit
for a in range(2 ,a__):
for b in range(2 ,a__):
_SCREAMING_SNAKE_CASE =a**b # calculates the current power
collect_powers.add(a__) # adds the result to the set
return len(a__)
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip()))) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
))
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=a__ ,default=1 ,help='''Number of TPU cores to use (1 or 8).''')
# positional
parser.add_argument(
'''training_script''' ,type=a__ ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=a__)
return parser.parse_args()
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =parse_args()
# Import training_script as a module.
_SCREAMING_SNAKE_CASE =Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
_SCREAMING_SNAKE_CASE =script_fpath.stem
_SCREAMING_SNAKE_CASE =importlib.import_module(a__)
# Patch sys.argv
_SCREAMING_SNAKE_CASE =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores)]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores)
if __name__ == "__main__":
main() | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : int = {'''vocab_file''': '''spiece.model'''}
snake_case_ : List[str] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
snake_case_ : Any = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = []
def __init__( self : Optional[Any] , _a : str , _a : Dict="<unk>" , _a : Tuple="<s>" , _a : Optional[int]="</s>" , _a : List[Any]="<pad>" , _a : Dict="[SEP]" , _a : int="[MASK]" , _a : List[Any]="[CLS]" , _a : Optional[Dict[str, Any]] = None , **_a : List[str] , ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , mask_token=_a , cls_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =vocab_file
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : Dict , _a : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self : Tuple , _a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_a , out_type=_a )
def __UpperCamelCase ( self : int , _a : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(_a )
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.sp_model.IdToPiece(_a )
return token
def __UpperCamelCase ( self : Union[str, Any] , _a : Any ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''''''
_SCREAMING_SNAKE_CASE =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : bool = False , _a : bool = None , _a : bool = True , **_a : int , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.pop('''use_source_tokenizer''' , _a )
_SCREAMING_SNAKE_CASE =self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
_SCREAMING_SNAKE_CASE =[]
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_SCREAMING_SNAKE_CASE =re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_a ) )
else:
_SCREAMING_SNAKE_CASE =''''''.join(_a )
_SCREAMING_SNAKE_CASE =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_SCREAMING_SNAKE_CASE =self.clean_up_tokenization(_a )
return clean_text
else:
return text
def __UpperCamelCase ( self : Any , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __UpperCamelCase ( self : Union[str, Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Union[str, Any] , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def __UpperCamelCase ( self : Optional[Any] , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "audio-spectrogram-transformer"
def __init__( self : Tuple , _a : Optional[int]=768 , _a : Any=12 , _a : Any=12 , _a : Union[str, Any]=3072 , _a : Union[str, Any]="gelu" , _a : Dict=0.0 , _a : Optional[int]=0.0 , _a : List[str]=0.02 , _a : int=1E-12 , _a : str=16 , _a : Optional[Any]=True , _a : Tuple=10 , _a : List[Any]=10 , _a : Any=1024 , _a : List[Any]=128 , **_a : Any , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**_a )
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
_SCREAMING_SNAKE_CASE =patch_size
_SCREAMING_SNAKE_CASE =qkv_bias
_SCREAMING_SNAKE_CASE =frequency_stride
_SCREAMING_SNAKE_CASE =time_stride
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =num_mel_bins | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCamelCase( a__):
return x + 2
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''x = 3'''
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {'''x''': 3} )
_SCREAMING_SNAKE_CASE ='''x = y'''
_SCREAMING_SNAKE_CASE ={'''y''': 5}
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {'''x''': 5, '''y''': 5} )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''y = add_two(x)'''
_SCREAMING_SNAKE_CASE ={'''x''': 3}
_SCREAMING_SNAKE_CASE =evaluate(_a , {'''add_two''': add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
assert result is None
assert "tried to execute add_two" in out.out
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''x = 3'''
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {'''x''': 3} )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''test_dict = {\'x\': x, \'y\': add_two(x)}'''
_SCREAMING_SNAKE_CASE ={'''x''': 3}
_SCREAMING_SNAKE_CASE =evaluate(_a , {'''add_two''': add_two} , state=_a )
self.assertDictEqual(_a , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_a , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''x = 3\ny = 5'''
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {'''x''': 3, '''y''': 5} )
def __UpperCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''text = f\'This is x: {x}.\''''
_SCREAMING_SNAKE_CASE ={'''x''': 3}
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_a , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''if x <= 3:\n y = 2\nelse:\n y = 5'''
_SCREAMING_SNAKE_CASE ={'''x''': 3}
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_a , {'''x''': 3, '''y''': 2} )
_SCREAMING_SNAKE_CASE ={'''x''': 8}
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {'''x''': 8, '''y''': 5} )
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''test_list = [x, add_two(x)]'''
_SCREAMING_SNAKE_CASE ={'''x''': 3}
_SCREAMING_SNAKE_CASE =evaluate(_a , {'''add_two''': add_two} , state=_a )
self.assertListEqual(_a , [3, 5] )
self.assertDictEqual(_a , {'''x''': 3, '''test_list''': [3, 5]} )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''y = x'''
_SCREAMING_SNAKE_CASE ={'''x''': 3}
_SCREAMING_SNAKE_CASE =evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {'''x''': 3, '''y''': 3} )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''test_list = [x, add_two(x)]\ntest_list[1]'''
_SCREAMING_SNAKE_CASE ={'''x''': 3}
_SCREAMING_SNAKE_CASE =evaluate(_a , {'''add_two''': add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {'''x''': 3, '''test_list''': [3, 5]} )
_SCREAMING_SNAKE_CASE ='''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
_SCREAMING_SNAKE_CASE ={'''x''': 3}
_SCREAMING_SNAKE_CASE =evaluate(_a , {'''add_two''': add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''x = 0\nfor i in range(3):\n x = i'''
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =evaluate(_a , {'''range''': range} , state=_a )
assert result == 2
self.assertDictEqual(_a , {'''x''': 2, '''i''': 2} ) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
snake_case_ : Union[str, Any] = TypeVar('''T''')
class A__ ( Generic[T] ):
def __init__( self : Any , _a : list[T] , _a : Callable[[T, T], T] ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =[any_type for _ in range(self.N )] + arr
_SCREAMING_SNAKE_CASE =fnc
self.build()
def __UpperCamelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
_SCREAMING_SNAKE_CASE =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self : str , _a : int , _a : T ) -> None:
"""simple docstring"""
p += self.N
_SCREAMING_SNAKE_CASE =v
while p > 1:
_SCREAMING_SNAKE_CASE =p // 2
_SCREAMING_SNAKE_CASE =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCamelCase ( self : Union[str, Any] , _a : int , _a : int ) -> T | None: # noqa: E741
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =l + self.N, r + self.N
_SCREAMING_SNAKE_CASE =None
while l <= r:
if l % 2 == 1:
_SCREAMING_SNAKE_CASE =self.st[l] if res is None else self.fn(_a , self.st[l] )
if r % 2 == 0:
_SCREAMING_SNAKE_CASE =self.st[r] if res is None else self.fn(_a , self.st[r] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
snake_case_ : int = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
snake_case_ : Any = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
snake_case_ : Union[str, Any] = SegmentTree(test_array, min)
snake_case_ : Optional[Any] = SegmentTree(test_array, max)
snake_case_ : List[str] = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase( ):
for i in range(len(a__)):
for j in range(a__ ,len(a__)):
_SCREAMING_SNAKE_CASE =reduce(a__ ,test_array[i : j + 1])
_SCREAMING_SNAKE_CASE =reduce(a__ ,test_array[i : j + 1])
_SCREAMING_SNAKE_CASE =reduce(lambda a__ ,a__: a + b ,test_array[i : j + 1])
assert min_range == min_segment_tree.query(a__ ,a__)
assert max_range == max_segment_tree.query(a__ ,a__)
assert sum_range == sum_segment_tree.query(a__ ,a__)
test_all_segments()
for index, value in test_updates.items():
snake_case_ : List[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments() | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
import os
from datetime import datetime as dt
from github import Github
snake_case_ : List[Any] = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Github(os.environ['''GITHUB_TOKEN'''])
_SCREAMING_SNAKE_CASE =g.get_repo('''huggingface/accelerate''')
_SCREAMING_SNAKE_CASE =repo.get_issues(state='''open''')
for issue in open_issues:
_SCREAMING_SNAKE_CASE =sorted([comment for comment in issue.get_comments()] ,key=lambda a__: i.created_at ,reverse=a__)
_SCREAMING_SNAKE_CASE =comments[0] if len(a__) > 0 else None
_SCREAMING_SNAKE_CASE =dt.utcnow()
_SCREAMING_SNAKE_CASE =(current_time - issue.updated_at).days
_SCREAMING_SNAKE_CASE =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''')
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''')
if __name__ == "__main__":
main() | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = MvpTokenizer
UpperCAmelCase = MvpTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = filter_roberta_detectors
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] , **_a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str , **_a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_SCREAMING_SNAKE_CASE =[0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , max_length=len(_a ) , padding=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_SCREAMING_SNAKE_CASE =batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
# Test that special tokens are reset
@require_torch
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding=_a , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , _a )
self.assertIn('''attention_mask''' , _a )
self.assertNotIn('''labels''' , _a )
self.assertNotIn('''decoder_attention_mask''' , _a )
@require_torch
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(text_target=_a , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(
['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=_a , truncation=_a , return_tensors='''pt''' )
self.assertIsInstance(_a , _a )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''A long paragraph for summarization.''']
_SCREAMING_SNAKE_CASE =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_SCREAMING_SNAKE_CASE =tokenizer(_a , text_target=_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =inputs['''input_ids''']
_SCREAMING_SNAKE_CASE =inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_SCREAMING_SNAKE_CASE =self.rust_tokenizer_class.from_pretrained(_a , **_a )
_SCREAMING_SNAKE_CASE =self.tokenizer_class.from_pretrained(_a , **_a )
_SCREAMING_SNAKE_CASE ='''A, <mask> AllenNLP sentence.'''
_SCREAMING_SNAKE_CASE =tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
_SCREAMING_SNAKE_CASE =tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
_SCREAMING_SNAKE_CASE =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_SCREAMING_SNAKE_CASE =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_a , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : str = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
snake_case_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Dict = logging.get_logger(__name__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE ='''huggingface/label-files'''
_SCREAMING_SNAKE_CASE ='''imagenet-1k-id2label.json'''
_SCREAMING_SNAKE_CASE =json.load(open(hf_hub_download(a__ ,a__ ,repo_type='''dataset''') ,'''r'''))
_SCREAMING_SNAKE_CASE ={int(a__): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE ='''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_SCREAMING_SNAKE_CASE =BitConfig(
conv_layer=a__ ,num_labels=1000 ,idalabel=a__ ,labelaid=a__ ,)
return config
def lowerCamelCase( a__):
if "stem.conv" in name:
_SCREAMING_SNAKE_CASE =name.replace('''stem.conv''' ,'''bit.embedder.convolution''')
if "blocks" in name:
_SCREAMING_SNAKE_CASE =name.replace('''blocks''' ,'''layers''')
if "head.fc" in name:
_SCREAMING_SNAKE_CASE =name.replace('''head.fc''' ,'''classifier.1''')
if name.startswith('''norm'''):
_SCREAMING_SNAKE_CASE ='''bit.''' + name
if "bit" not in name and "classifier" not in name:
_SCREAMING_SNAKE_CASE ='''bit.encoder.''' + name
return name
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_SCREAMING_SNAKE_CASE =Image.open(requests.get(a__ ,stream=a__).raw)
return im
@torch.no_grad()
def lowerCamelCase( a__ ,a__ ,a__=False):
_SCREAMING_SNAKE_CASE =get_config(a__)
# load original model from timm
_SCREAMING_SNAKE_CASE =create_model(a__ ,pretrained=a__)
timm_model.eval()
# load state_dict of original model
_SCREAMING_SNAKE_CASE =timm_model.state_dict()
for key in state_dict.copy().keys():
_SCREAMING_SNAKE_CASE =state_dict.pop(a__)
_SCREAMING_SNAKE_CASE =val.squeeze() if '''head''' in key else val
# load HuggingFace model
_SCREAMING_SNAKE_CASE =BitForImageClassification(a__)
model.eval()
model.load_state_dict(a__)
# create image processor
_SCREAMING_SNAKE_CASE =create_transform(**resolve_data_config({} ,model=a__))
_SCREAMING_SNAKE_CASE =transform.transforms
_SCREAMING_SNAKE_CASE ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_SCREAMING_SNAKE_CASE =BitImageProcessor(
do_resize=a__ ,size={'''shortest_edge''': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=a__ ,crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} ,do_normalize=a__ ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,)
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =transform(a__).unsqueeze(0)
_SCREAMING_SNAKE_CASE =processor(a__ ,return_tensors='''pt''').pixel_values
# verify pixel values
assert torch.allclose(a__ ,a__)
# verify logits
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(a__)
_SCREAMING_SNAKE_CASE =outputs.logits
print('''Logits:''' ,logits[0, :3])
print('''Predicted class:''' ,model.config.idalabel[logits.argmax(-1).item()])
_SCREAMING_SNAKE_CASE =timm_model(a__)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(a__ ,outputs.logits ,atol=1e-3)
print('''Looks ok!''')
if pytorch_dump_folder_path is not None:
Path(a__).mkdir(exist_ok=a__)
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}")
model.save_pretrained(a__)
processor.save_pretrained(a__)
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub")
model.push_to_hub(f"ybelkada/{model_name}")
processor.push_to_hub(f"ybelkada/{model_name}")
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 42
class A__ ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase = True
@register_to_config
def __init__( self : Tuple , _a : int = 3 , _a : int = 3 , _a : Tuple[str] = ("DownEncoderBlock2D",) , _a : Tuple[str] = ("UpDecoderBlock2D",) , _a : Tuple[int] = (64,) , _a : int = 1 , _a : str = "silu" , _a : int = 4 , _a : int = 32 , _a : int = 32 , _a : float = 0.1_82_15 , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_SCREAMING_SNAKE_CASE =Encoder(
in_channels=_a , out_channels=_a , down_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , double_z=_a , )
# pass init params to Decoder
_SCREAMING_SNAKE_CASE =Decoder(
in_channels=_a , out_channels=_a , up_block_types=_a , block_out_channels=_a , layers_per_block=_a , norm_num_groups=_a , act_fn=_a , )
_SCREAMING_SNAKE_CASE =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
_SCREAMING_SNAKE_CASE =nn.Convad(_a , _a , 1 )
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =False
# only relevant if vae tiling is enabled
_SCREAMING_SNAKE_CASE =self.config.sample_size
_SCREAMING_SNAKE_CASE =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
_SCREAMING_SNAKE_CASE =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
_SCREAMING_SNAKE_CASE =0.25
def __UpperCamelCase ( self : Any , _a : Optional[Any] , _a : Any=False ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(_a , (Encoder, Decoder) ):
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[int] , _a : bool = True ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =use_tiling
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
self.enable_tiling(_a )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =True
def __UpperCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __UpperCamelCase ( self : Dict ) -> Dict[str, AttentionProcessor]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
def fn_recursive_add_processors(_a : str , _a : torch.nn.Module , _a : Dict[str, AttentionProcessor] ):
if hasattr(_a , '''set_processor''' ):
_SCREAMING_SNAKE_CASE =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _a , _a )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_a , _a , _a )
return processors
def __UpperCamelCase ( self : Optional[int] , _a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =len(self.attn_processors.keys() )
if isinstance(_a , _a ) and len(_a ) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_a )} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(_a : str , _a : torch.nn.Module , _a : List[Any] ):
if hasattr(_a , '''set_processor''' ):
if not isinstance(_a , _a ):
module.set_processor(_a )
else:
module.set_processor(processor.pop(f"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _a , _a )
for name, module in self.named_children():
fn_recursive_attn_processor(_a , _a , _a )
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def __UpperCamelCase ( self : Tuple , _a : torch.FloatTensor , _a : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_a , return_dict=_a )
if self.use_slicing and x.shape[0] > 1:
_SCREAMING_SNAKE_CASE =[self.encoder(_a ) for x_slice in x.split(1 )]
_SCREAMING_SNAKE_CASE =torch.cat(_a )
else:
_SCREAMING_SNAKE_CASE =self.encoder(_a )
_SCREAMING_SNAKE_CASE =self.quant_conv(_a )
_SCREAMING_SNAKE_CASE =DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def __UpperCamelCase ( self : str , _a : torch.FloatTensor , _a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_a , return_dict=_a )
_SCREAMING_SNAKE_CASE =self.post_quant_conv(_a )
_SCREAMING_SNAKE_CASE =self.decoder(_a )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
@apply_forward_hook
def __UpperCamelCase ( self : str , _a : torch.FloatTensor , _a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if self.use_slicing and z.shape[0] > 1:
_SCREAMING_SNAKE_CASE =[self._decode(_a ).sample for z_slice in z.split(1 )]
_SCREAMING_SNAKE_CASE =torch.cat(_a )
else:
_SCREAMING_SNAKE_CASE =self._decode(_a ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_a )
def __UpperCamelCase ( self : List[Any] , _a : List[str] , _a : Any , _a : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =min(a.shape[2] , b.shape[2] , _a )
for y in range(_a ):
_SCREAMING_SNAKE_CASE =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def __UpperCamelCase ( self : Union[str, Any] , _a : List[str] , _a : Optional[int] , _a : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =min(a.shape[3] , b.shape[3] , _a )
for x in range(_a ):
_SCREAMING_SNAKE_CASE =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def __UpperCamelCase ( self : Tuple , _a : torch.FloatTensor , _a : bool = True ) -> AutoencoderKLOutput:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
_SCREAMING_SNAKE_CASE =int(self.tile_latent_min_size * self.tile_overlap_factor )
_SCREAMING_SNAKE_CASE =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_SCREAMING_SNAKE_CASE =[]
for i in range(0 , x.shape[2] , _a ):
_SCREAMING_SNAKE_CASE =[]
for j in range(0 , x.shape[3] , _a ):
_SCREAMING_SNAKE_CASE =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_SCREAMING_SNAKE_CASE =self.encoder(_a )
_SCREAMING_SNAKE_CASE =self.quant_conv(_a )
row.append(_a )
rows.append(_a )
_SCREAMING_SNAKE_CASE =[]
for i, row in enumerate(_a ):
_SCREAMING_SNAKE_CASE =[]
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_SCREAMING_SNAKE_CASE =self.blend_v(rows[i - 1][j] , _a , _a )
if j > 0:
_SCREAMING_SNAKE_CASE =self.blend_h(row[j - 1] , _a , _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a , dim=3 ) )
_SCREAMING_SNAKE_CASE =torch.cat(_a , dim=2 )
_SCREAMING_SNAKE_CASE =DiagonalGaussianDistribution(_a )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_a )
def __UpperCamelCase ( self : Optional[int] , _a : torch.FloatTensor , _a : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
_SCREAMING_SNAKE_CASE =int(self.tile_sample_min_size * self.tile_overlap_factor )
_SCREAMING_SNAKE_CASE =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_SCREAMING_SNAKE_CASE =[]
for i in range(0 , z.shape[2] , _a ):
_SCREAMING_SNAKE_CASE =[]
for j in range(0 , z.shape[3] , _a ):
_SCREAMING_SNAKE_CASE =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_SCREAMING_SNAKE_CASE =self.post_quant_conv(_a )
_SCREAMING_SNAKE_CASE =self.decoder(_a )
row.append(_a )
rows.append(_a )
_SCREAMING_SNAKE_CASE =[]
for i, row in enumerate(_a ):
_SCREAMING_SNAKE_CASE =[]
for j, tile in enumerate(_a ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_SCREAMING_SNAKE_CASE =self.blend_v(rows[i - 1][j] , _a , _a )
if j > 0:
_SCREAMING_SNAKE_CASE =self.blend_h(row[j - 1] , _a , _a )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_a , dim=3 ) )
_SCREAMING_SNAKE_CASE =torch.cat(_a , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def __UpperCamelCase ( self : Dict , _a : torch.FloatTensor , _a : bool = False , _a : bool = True , _a : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =sample
_SCREAMING_SNAKE_CASE =self.encode(_a ).latent_dist
if sample_posterior:
_SCREAMING_SNAKE_CASE =posterior.sample(generator=_a )
else:
_SCREAMING_SNAKE_CASE =posterior.mode()
_SCREAMING_SNAKE_CASE =self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a ) | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
))
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' ,type=a__ ,default=1 ,help='''Number of TPU cores to use (1 or 8).''')
# positional
parser.add_argument(
'''training_script''' ,type=a__ ,help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) ,)
# rest from the training program
parser.add_argument('''training_script_args''' ,nargs=a__)
return parser.parse_args()
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =parse_args()
# Import training_script as a module.
_SCREAMING_SNAKE_CASE =Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
_SCREAMING_SNAKE_CASE =script_fpath.stem
_SCREAMING_SNAKE_CASE =importlib.import_module(a__)
# Patch sys.argv
_SCREAMING_SNAKE_CASE =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores)]
xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores)
if __name__ == "__main__":
main() | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[str] = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "mobilenet_v1"
def __init__( self : Dict , _a : List[str]=3 , _a : Optional[Any]=224 , _a : Optional[int]=1.0 , _a : List[Any]=8 , _a : Any="relu6" , _a : Union[str, Any]=True , _a : List[str]=0.9_99 , _a : Any=0.02 , _a : List[Any]=0.0_01 , **_a : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =depth_multiplier
_SCREAMING_SNAKE_CASE =min_depth
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =tf_padding
_SCREAMING_SNAKE_CASE =classifier_dropout_prob
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =layer_norm_eps
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = version.parse("1.11" )
@property
def __UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __UpperCamelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __UpperCamelCase ( self : str ) -> float:
"""simple docstring"""
return 1E-4 | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
from __future__ import annotations
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =sorted(numsa + numsa)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =divmod(len(a__) ,2)
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Any = [float(x) for x in input('''Enter the elements of first array: ''').split()]
snake_case_ : List[Any] = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""") | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A__ ( unittest.TestCase ):
def __init__( self : Tuple , _a : Dict , _a : Any=13 , _a : Dict=7 , _a : Union[str, Any]=True , _a : str=True , _a : Optional[Any]=True , _a : Any=True , _a : List[str]=99 , _a : Tuple=32 , _a : List[str]=5 , _a : Optional[int]=4 , _a : List[Any]=37 , _a : str="gelu" , _a : Tuple=0.1 , _a : List[Any]=0.1 , _a : Union[str, Any]=512 , _a : Dict=16 , _a : Dict=2 , _a : Tuple=0.02 , _a : Dict=4 , ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_attention_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_choices
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = True
UpperCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =FlaxBertModelTester(self )
@slow
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =FlaxBertModel.from_pretrained('''bert-base-cased''' )
_SCREAMING_SNAKE_CASE =model(np.ones((1, 1) ) )
self.assertIsNotNone(_a ) | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : Optional[int] = '''▁'''
snake_case_ : Optional[Any] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
snake_case_ : List[Any] = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
snake_case_ : List[str] = {
'''facebook/s2t-small-librispeech-asr''': 10_24,
}
snake_case_ : str = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
snake_case_ : int = {'''mustc''': MUSTC_LANGS}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = MAX_MODEL_INPUT_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = []
def __init__( self : str , _a : Dict , _a : int , _a : Optional[Any]="<s>" , _a : str="</s>" , _a : List[str]="<pad>" , _a : List[str]="<unk>" , _a : Union[str, Any]=False , _a : Optional[Any]=False , _a : Any=None , _a : Union[str, Any]=None , _a : Optional[Dict[str, Any]] = None , **_a : Dict , ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , do_upper_case=_a , do_lower_case=_a , tgt_lang=_a , lang_codes=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_SCREAMING_SNAKE_CASE =do_upper_case
_SCREAMING_SNAKE_CASE =do_lower_case
_SCREAMING_SNAKE_CASE =load_json(_a )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE =spm_file
_SCREAMING_SNAKE_CASE =load_spm(_a , self.sp_model_kwargs )
if lang_codes is not None:
_SCREAMING_SNAKE_CASE =lang_codes
_SCREAMING_SNAKE_CASE =LANGUAGES[lang_codes]
_SCREAMING_SNAKE_CASE =[f"<lang:{lang}>" for lang in self.langs]
_SCREAMING_SNAKE_CASE ={lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
_SCREAMING_SNAKE_CASE =self.lang_tokens
_SCREAMING_SNAKE_CASE =tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_SCREAMING_SNAKE_CASE ={}
@property
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self._tgt_lang
@tgt_lang.setter
def __UpperCamelCase ( self : Optional[Any] , _a : int ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =new_tgt_lang
self.set_tgt_lang_special_tokens(_a )
def __UpperCamelCase ( self : Any , _a : str ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.lang_code_to_id[tgt_lang]
_SCREAMING_SNAKE_CASE =[lang_code_id]
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_a , out_type=_a )
def __UpperCamelCase ( self : Optional[int] , _a : List[Any] ) -> Any:
"""simple docstring"""
return self.encoder.get(_a , self.encoder[self.unk_token] )
def __UpperCamelCase ( self : List[str] , _a : int ) -> str:
"""simple docstring"""
return self.decoder.get(_a , self.unk_token )
def __UpperCamelCase ( self : List[Any] , _a : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_SCREAMING_SNAKE_CASE =self.sp_model.decode(_a )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_SCREAMING_SNAKE_CASE =[]
else:
current_sub_tokens.append(_a )
_SCREAMING_SNAKE_CASE =self.sp_model.decode(_a )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any] , _a : Tuple=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Optional[int] , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
_SCREAMING_SNAKE_CASE =[1] * len(self.prefix_tokens )
_SCREAMING_SNAKE_CASE =[1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.__dict__.copy()
_SCREAMING_SNAKE_CASE =None
return state
def __setstate__( self : int , _a : Dict ) -> None:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self : Tuple , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Path(_a )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
_SCREAMING_SNAKE_CASE =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_SCREAMING_SNAKE_CASE =save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , _a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _a )
elif not os.path.isfile(self.spm_file ):
with open(_a , '''wb''' ) as fi:
_SCREAMING_SNAKE_CASE =self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =sentencepiece.SentencePieceProcessor(**a__)
spm.Load(str(a__))
return spm
def lowerCamelCase( a__):
with open(a__ ,'''r''') as f:
return json.load(a__)
def lowerCamelCase( a__ ,a__):
with open(a__ ,'''w''') as f:
json.dump(a__ ,a__ ,indent=2) | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
snake_case_ : Dict = '''\
Text data.
Second line of data.'''
snake_case_ : Optional[int] = '''file'''
@pytest.fixture(scope='''session''')
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('''data''') / (FILE_PATH + '''.zstd''')
_SCREAMING_SNAKE_CASE =bytes(a__ ,'''utf-8''')
with zstd.open(a__ ,'''wb''') as f:
f.write(a__)
return path
@pytest.fixture
def lowerCamelCase( a__):
with open(os.path.join(tmpfs.local_root_dir ,a__) ,'''w''') as f:
f.write(a__)
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' ,['''gzip''', '''xz''', '''zstd'''])
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_SCREAMING_SNAKE_CASE =input_paths[compression_format]
_SCREAMING_SNAKE_CASE =tmp_path / '''cache'''
_SCREAMING_SNAKE_CASE =DownloadConfig(cache_dir=a__ ,extract_compressed_file=a__)
_SCREAMING_SNAKE_CASE =cached_path(a__ ,download_config=a__)
with open(a__) as f:
_SCREAMING_SNAKE_CASE =f.read()
with open(a__) as f:
_SCREAMING_SNAKE_CASE =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' ,[True, False])
@pytest.mark.parametrize('''default_cache_dir''' ,[True, False])
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ='''custom_cache'''
_SCREAMING_SNAKE_CASE ='''custom_extracted_dir'''
_SCREAMING_SNAKE_CASE =tmp_path / '''custom_extracted_path'''
if default_extracted:
_SCREAMING_SNAKE_CASE =('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' ,a__)
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' ,str(a__))
_SCREAMING_SNAKE_CASE =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_SCREAMING_SNAKE_CASE =xz_file
_SCREAMING_SNAKE_CASE =(
DownloadConfig(extract_compressed_file=a__)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=a__)
)
_SCREAMING_SNAKE_CASE =cached_path(a__ ,download_config=a__)
assert Path(a__).parent.parts[-2:] == expected
def lowerCamelCase( a__):
# absolute path
_SCREAMING_SNAKE_CASE =str(Path(a__).resolve())
assert cached_path(a__) == text_file
# relative path
_SCREAMING_SNAKE_CASE =str(Path(a__).resolve().relative_to(Path(os.getcwd())))
assert cached_path(a__) == text_file
def lowerCamelCase( a__):
# absolute path
_SCREAMING_SNAKE_CASE =str(tmp_path.resolve() / '''__missing_file__.txt''')
with pytest.raises(a__):
cached_path(a__)
# relative path
_SCREAMING_SNAKE_CASE ='''./__missing_file__.txt'''
with pytest.raises(a__):
cached_path(a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =get_from_cache(f"tmp://{tmpfs_file}")
with open(a__) as f:
_SCREAMING_SNAKE_CASE =f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,a__)
def lowerCamelCase( ):
with pytest.raises(a__):
cached_path('''https://huggingface.co''')
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('''data''') / '''file.html'''
with pytest.raises(a__):
http_get('''https://huggingface.co''' ,temp_file=a__)
with pytest.raises(a__):
http_head('''https://huggingface.co''')
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('''data''') / '''file.html'''
with pytest.raises(a__):
ftp_get('''ftp://huggingface.co''' ,temp_file=a__)
with pytest.raises(a__):
ftp_head('''ftp://huggingface.co''')
@patch('''datasets.config.HF_DATASETS_OFFLINE''' ,a__)
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tmp_path_factory.mktemp('''data''') / '''file.html'''
with pytest.raises(a__):
fsspec_get('''s3://huggingface.co''' ,temp_file=a__)
with pytest.raises(a__):
fsspec_head('''s3://huggingface.co''') | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "The output directory where the model will be written."} , )
UpperCAmelCase = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} , )
UpperCAmelCase = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments,))
((_SCREAMING_SNAKE_CASE) , ) =parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(model_args.encoder_config_name)
# Use pretrained encoder model's config
else:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(model_args.encoder_model_name_or_path)
# Use explicit specified decoder config
if model_args.decoder_config_name:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(model_args.decoder_config_name)
# Use pretrained decoder model's config
else:
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(model_args.decoder_model_name_or_path)
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path ,decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path ,encoder_config=a__ ,decoder_config=a__ ,)
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_SCREAMING_SNAKE_CASE =decoder_config.decoder_start_token_id
_SCREAMING_SNAKE_CASE =decoder_config.pad_token_id
if decoder_start_token_id is None:
_SCREAMING_SNAKE_CASE =decoder_config.bos_token_id
if pad_token_id is None:
_SCREAMING_SNAKE_CASE =decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_SCREAMING_SNAKE_CASE =decoder_config.eos_token_id
_SCREAMING_SNAKE_CASE =decoder_start_token_id
_SCREAMING_SNAKE_CASE =pad_token_id
_SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path)
_SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
model.save_pretrained(model_args.output_dir)
image_processor.save_pretrained(model_args.output_dir)
tokenizer.save_pretrained(model_args.output_dir)
if __name__ == "__main__":
main() | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCamelCase( a__ ,a__ ,a__):
if isinstance(a__ ,torch.Tensor):
return image
elif isinstance(a__ ,PIL.Image.Image):
_SCREAMING_SNAKE_CASE =[image]
if isinstance(image[0] ,PIL.Image.Image):
_SCREAMING_SNAKE_CASE =[np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_SCREAMING_SNAKE_CASE =np.concatenate(a__ ,axis=0)
_SCREAMING_SNAKE_CASE =np.array(a__).astype(np.floataa) / 255.0
_SCREAMING_SNAKE_CASE =image.transpose(0 ,3 ,1 ,2)
_SCREAMING_SNAKE_CASE =2.0 * image - 1.0
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__)
elif isinstance(image[0] ,torch.Tensor):
_SCREAMING_SNAKE_CASE =torch.cat(a__ ,dim=0)
return image
def lowerCamelCase( a__ ,a__ ,a__ ,a__=0.9995):
if not isinstance(a__ ,np.ndarray):
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =va.device
_SCREAMING_SNAKE_CASE =va.cpu().numpy()
_SCREAMING_SNAKE_CASE =va.cpu().numpy()
_SCREAMING_SNAKE_CASE =np.sum(va * va / (np.linalg.norm(a__) * np.linalg.norm(a__)))
if np.abs(a__) > DOT_THRESHOLD:
_SCREAMING_SNAKE_CASE =(1 - t) * va + t * va
else:
_SCREAMING_SNAKE_CASE =np.arccos(a__)
_SCREAMING_SNAKE_CASE =np.sin(a__)
_SCREAMING_SNAKE_CASE =theta_a * t
_SCREAMING_SNAKE_CASE =np.sin(a__)
_SCREAMING_SNAKE_CASE =np.sin(theta_a - theta_t) / sin_theta_a
_SCREAMING_SNAKE_CASE =sin_theta_t / sin_theta_a
_SCREAMING_SNAKE_CASE =sa * va + sa * va
if inputs_are_torch:
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__).to(a__)
return va
def lowerCamelCase( a__ ,a__):
_SCREAMING_SNAKE_CASE =F.normalize(a__ ,dim=-1)
_SCREAMING_SNAKE_CASE =F.normalize(a__ ,dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCamelCase( a__ ,a__):
for param in model.parameters():
_SCREAMING_SNAKE_CASE =value
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : AutoencoderKL , _a : CLIPTextModel , _a : CLIPModel , _a : CLIPTokenizer , _a : UNetaDConditionModel , _a : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _a : CLIPFeatureExtractor , _a : Dict=None , _a : Optional[int]=None , _a : Any=None , ) -> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_a , text_encoder=_a , clip_model=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , coca_model=_a , coca_tokenizer=_a , coca_transform=_a , )
_SCREAMING_SNAKE_CASE =(
feature_extractor.size
if isinstance(feature_extractor.size , _a )
else feature_extractor.size['''shortest_edge''']
)
_SCREAMING_SNAKE_CASE =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _a )
set_requires_grad(self.clip_model , _a )
def __UpperCamelCase ( self : Optional[Any] , _a : Optional[Union[str, int]] = "auto" ) -> Dict:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_SCREAMING_SNAKE_CASE =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
set_requires_grad(self.vae , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
set_requires_grad(self.vae , _a )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
set_requires_grad(self.unet , _a )
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
set_requires_grad(self.unet , _a )
def __UpperCamelCase ( self : Tuple , _a : List[Any] , _a : Tuple , _a : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =min(int(num_inference_steps * strength ) , _a )
_SCREAMING_SNAKE_CASE =max(num_inference_steps - init_timestep , 0 )
_SCREAMING_SNAKE_CASE =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCamelCase ( self : int , _a : int , _a : Any , _a : Optional[int] , _a : List[str] , _a : List[str] , _a : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_a , torch.Tensor ):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(_a )}" )
_SCREAMING_SNAKE_CASE =image.to(device=_a , dtype=_a )
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_a )
]
_SCREAMING_SNAKE_CASE =torch.cat(_a , dim=0 )
else:
_SCREAMING_SNAKE_CASE =self.vae.encode(_a ).latent_dist.sample(_a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_SCREAMING_SNAKE_CASE =0.1_82_15 * init_latents
_SCREAMING_SNAKE_CASE =init_latents.repeat_interleave(_a , dim=0 )
_SCREAMING_SNAKE_CASE =randn_tensor(init_latents.shape , generator=_a , device=_a , dtype=_a )
# get latents
_SCREAMING_SNAKE_CASE =self.scheduler.add_noise(_a , _a , _a )
_SCREAMING_SNAKE_CASE =init_latents
return latents
def __UpperCamelCase ( self : Tuple , _a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.coca_transform(_a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_SCREAMING_SNAKE_CASE =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_SCREAMING_SNAKE_CASE =self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def __UpperCamelCase ( self : Tuple , _a : Union[str, Any] , _a : Optional[int] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.feature_extractor.preprocess(_a )
_SCREAMING_SNAKE_CASE =torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_SCREAMING_SNAKE_CASE =self.clip_model.get_image_features(_a )
_SCREAMING_SNAKE_CASE =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
_SCREAMING_SNAKE_CASE =image_embeddings_clip.repeat_interleave(_a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCamelCase ( self : Optional[int] , _a : int , _a : Any , _a : Optional[Any] , _a : Optional[int] , _a : str , _a : Optional[Any] , _a : Dict , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =latents.detach().requires_grad_()
_SCREAMING_SNAKE_CASE =self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
_SCREAMING_SNAKE_CASE =self.unet(_a , _a , encoder_hidden_states=_a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_SCREAMING_SNAKE_CASE =self.scheduler.alphas_cumprod[timestep]
_SCREAMING_SNAKE_CASE =1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_SCREAMING_SNAKE_CASE =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_SCREAMING_SNAKE_CASE =torch.sqrt(_a )
_SCREAMING_SNAKE_CASE =pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _a ):
_SCREAMING_SNAKE_CASE =self.scheduler.sigmas[index]
_SCREAMING_SNAKE_CASE =latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_SCREAMING_SNAKE_CASE =1 / 0.1_82_15 * sample
_SCREAMING_SNAKE_CASE =self.vae.decode(_a ).sample
_SCREAMING_SNAKE_CASE =(image / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE =transforms.Resize(self.feature_extractor_size )(_a )
_SCREAMING_SNAKE_CASE =self.normalize(_a ).to(latents.dtype )
_SCREAMING_SNAKE_CASE =self.clip_model.get_image_features(_a )
_SCREAMING_SNAKE_CASE =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_a )
_SCREAMING_SNAKE_CASE =spherical_dist_loss(_a , _a ).mean() * clip_guidance_scale
_SCREAMING_SNAKE_CASE =-torch.autograd.grad(_a , _a )[0]
if isinstance(self.scheduler , _a ):
_SCREAMING_SNAKE_CASE =latents.detach() + grads * (sigma**2)
_SCREAMING_SNAKE_CASE =noise_pred_original
else:
_SCREAMING_SNAKE_CASE =noise_pred_original - torch.sqrt(_a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : List[Any] , _a : Union[torch.FloatTensor, PIL.Image.Image] , _a : Union[torch.FloatTensor, PIL.Image.Image] , _a : Optional[str] = None , _a : Optional[str] = None , _a : Optional[int] = 512 , _a : Optional[int] = 512 , _a : float = 0.6 , _a : Optional[int] = 50 , _a : Optional[float] = 7.5 , _a : Optional[int] = 1 , _a : float = 0.0 , _a : Optional[float] = 100 , _a : Optional[torch.Generator] = None , _a : Optional[str] = "pil" , _a : bool = True , _a : float = 0.8 , _a : float = 0.1 , _a : float = 0.1 , ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(_a )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(_a , torch.Generator ) and batch_size > 1:
_SCREAMING_SNAKE_CASE =[generator] + [None] * (batch_size - 1)
_SCREAMING_SNAKE_CASE =[
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_SCREAMING_SNAKE_CASE =[x[0] for x in coca_is_none if x[1]]
_SCREAMING_SNAKE_CASE =''', '''.join(_a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_a ):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
_SCREAMING_SNAKE_CASE =self.get_image_description(_a )
if style_prompt is None:
if len(_a ):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
_SCREAMING_SNAKE_CASE =self.get_image_description(_a )
# get prompt text embeddings for content and style
_SCREAMING_SNAKE_CASE =self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE =self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_SCREAMING_SNAKE_CASE =self.tokenizer(
_a , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE =self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_SCREAMING_SNAKE_CASE =slerp(_a , _a , _a )
# duplicate text embeddings for each generation per prompt
_SCREAMING_SNAKE_CASE =text_embeddings.repeat_interleave(_a , dim=0 )
# set timesteps
_SCREAMING_SNAKE_CASE ='''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_SCREAMING_SNAKE_CASE ={}
if accepts_offset:
_SCREAMING_SNAKE_CASE =1
self.scheduler.set_timesteps(_a , **_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.get_timesteps(_a , _a , self.device )
_SCREAMING_SNAKE_CASE =timesteps[:1].repeat(_a )
# Preprocess image
_SCREAMING_SNAKE_CASE =preprocess(_a , _a , _a )
_SCREAMING_SNAKE_CASE =self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
_SCREAMING_SNAKE_CASE =preprocess(_a , _a , _a )
_SCREAMING_SNAKE_CASE =self.prepare_latents(
_a , _a , _a , text_embeddings.dtype , self.device , _a )
_SCREAMING_SNAKE_CASE =slerp(_a , _a , _a )
if clip_guidance_scale > 0:
_SCREAMING_SNAKE_CASE =self.get_clip_image_embeddings(_a , _a )
_SCREAMING_SNAKE_CASE =self.get_clip_image_embeddings(_a , _a )
_SCREAMING_SNAKE_CASE =slerp(
_a , _a , _a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_SCREAMING_SNAKE_CASE =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE =content_text_input.input_ids.shape[-1]
_SCREAMING_SNAKE_CASE =self.tokenizer([''''''] , padding='''max_length''' , max_length=_a , return_tensors='''pt''' )
_SCREAMING_SNAKE_CASE =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_SCREAMING_SNAKE_CASE =uncond_embeddings.repeat_interleave(_a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_SCREAMING_SNAKE_CASE =(batch_size, self.unet.config.in_channels, height // 8, width // 8)
_SCREAMING_SNAKE_CASE =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_SCREAMING_SNAKE_CASE =torch.randn(_a , generator=_a , device='''cpu''' , dtype=_a ).to(
self.device )
else:
_SCREAMING_SNAKE_CASE =torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
_SCREAMING_SNAKE_CASE =latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_SCREAMING_SNAKE_CASE =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_SCREAMING_SNAKE_CASE ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_SCREAMING_SNAKE_CASE ={}
if accepts_eta:
_SCREAMING_SNAKE_CASE =eta
# check if the scheduler accepts generator
_SCREAMING_SNAKE_CASE ='''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_SCREAMING_SNAKE_CASE =generator
with self.progress_bar(total=_a ):
for i, t in enumerate(_a ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE =self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
_SCREAMING_SNAKE_CASE =self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_SCREAMING_SNAKE_CASE =(
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.cond_fn(
_a , _a , _a , _a , _a , _a , _a , )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_SCREAMING_SNAKE_CASE =1 / 0.1_82_15 * latents
_SCREAMING_SNAKE_CASE =self.vae.decode(_a ).sample
_SCREAMING_SNAKE_CASE =(image / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE =self.numpy_to_pil(_a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a ) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class A__ ( UpperCamelCase__ ):
def __init__( self : List[Any] , _a : Optional[Any] , _a : Any=13 , _a : Any=7 , _a : Union[str, Any]=True , _a : List[str]=True , _a : List[Any]=False , _a : str=True , _a : str=99 , _a : List[Any]=32 , _a : List[Any]=5 , _a : List[Any]=4 , _a : Tuple=37 , _a : Any="gelu" , _a : Dict=0.1 , _a : int=0.1 , _a : str=512 , _a : str=16 , _a : Any=2 , _a : int=0.02 , _a : int=3 , _a : Optional[int]=4 , _a : int=None , ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =num_choices
_SCREAMING_SNAKE_CASE =scope
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : int ) -> int:
"""simple docstring"""
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Tuple , _a : Optional[Any] , _a : Dict , _a : Any , _a : List[Any] , _a : Optional[Any] , _a : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DistilBertModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , _a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : List[str] , _a : List[Any] , _a : Any , _a : str , _a : List[Any] , _a : List[Any] , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DistilBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , _a : str , _a : Tuple , _a : List[Any] , _a : List[str] , _a : Optional[Any] , _a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DistilBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , attention_mask=_a , start_positions=_a , end_positions=_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Tuple , _a : Dict , _a : Union[str, Any] , _a : Tuple , _a : List[Any] , _a : Dict , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =DistilBertForSequenceClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : int , _a : Any , _a : List[str] , _a : List[Any] , _a : Optional[Any] , _a : int , _a : List[str] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =DistilBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any] , _a : Any , _a : Any , _a : Optional[int] , _a : List[Any] , _a : Dict , _a : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =DistilBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =model(
_a , attention_mask=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCAmelCase = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DistilBertModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , dim=37 )
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_a )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_a )
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_a )
def __UpperCamelCase ( self : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_a )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =DistilBertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =model_class(config=_a )
_SCREAMING_SNAKE_CASE =self._prepare_for_class(_a , _a )
_SCREAMING_SNAKE_CASE =torch.jit.trace(
_a , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_a , os.path.join(_a , '''traced_model.pt''' ) )
_SCREAMING_SNAKE_CASE =torch.jit.load(os.path.join(_a , '''traced_model.pt''' ) , map_location=_a )
loaded(inputs_dict['''input_ids'''].to(_a ) , inputs_dict['''attention_mask'''].to(_a ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
_SCREAMING_SNAKE_CASE =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_SCREAMING_SNAKE_CASE =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )[0]
_SCREAMING_SNAKE_CASE =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) ) | 691 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase( a__ = "https://www.worldometers.info/coronavirus"):
_SCREAMING_SNAKE_CASE =BeautifulSoup(requests.get(a__).text ,'''html.parser''')
_SCREAMING_SNAKE_CASE =soup.findAll('''h1''')
_SCREAMING_SNAKE_CASE =soup.findAll('''div''' ,{'''class''': '''maincounter-number'''})
keys += soup.findAll('''span''' ,{'''class''': '''panel-title'''})
values += soup.findAll('''div''' ,{'''class''': '''number-table-main'''})
return {key.text.strip(): value.text.strip() for key, value in zip(a__ ,a__)}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""") | 691 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "encoder-decoder"
UpperCAmelCase = True
def __init__( self : Tuple , **_a : int ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_a )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''encoder''' )
_SCREAMING_SNAKE_CASE =encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''decoder''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =True
@classmethod
def __UpperCamelCase ( cls : List[str] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : List[Any] ) -> PretrainedConfig:
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a )
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.decoder.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 |
def lowerCamelCase( a__ ,a__):
return number | (1 << position)
def lowerCamelCase( a__ ,a__):
return number & ~(1 << position)
def lowerCamelCase( a__ ,a__):
return number ^ (1 << position)
def lowerCamelCase( a__ ,a__):
return ((number >> position) & 1) == 1
def lowerCamelCase( a__ ,a__):
return int((number & (1 << position)) != 0)
if __name__ == "__main__":
import doctest
doctest.testmod() | 691 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A__ ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def __UpperCamelCase ( self : List[Any] , _a : str ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , config_name=_a )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_pretrained(_a , config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _a )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained('''gpt2''' )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_model_config(_a )
_SCREAMING_SNAKE_CASE =GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a , _a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GenerationConfig()
_SCREAMING_SNAKE_CASE ={
'''max_new_tokens''': 1024,
'''foo''': '''bar''',
}
_SCREAMING_SNAKE_CASE =copy.deepcopy(_a )
_SCREAMING_SNAKE_CASE =generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a , _a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a , {'''foo''': '''bar'''} )
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GenerationConfig()
_SCREAMING_SNAKE_CASE ='''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_model_config(_a )
assert not hasattr(_a , '''foo''' ) # no new kwargs should be initialized if from config
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _a )
self.assertEqual(default_config.num_beams , 1 )
_SCREAMING_SNAKE_CASE =GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_pretrained(_a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : Tuple ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCamelCase ( cls : str ) -> List[str]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id='''test-generation-config''' , push_to_hub=_a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_pretrained(f"{USER}/test-generation-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GenerationConfig(
do_sample=_a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=_a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) ) | 691 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =8
# DPR tok
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
_SCREAMING_SNAKE_CASE =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_SCREAMING_SNAKE_CASE ={'''unk_token''': '''<unk>'''}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_a , exist_ok=_a )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
_SCREAMING_SNAKE_CASE =os.path.join(_a , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_a ) )
def __UpperCamelCase ( self : List[str] ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Dict ) -> DPRContextEncoderTokenizer:
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self : Optional[int] , _a : bool ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''dataset''' )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _a ) , )
return retriever
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
_SCREAMING_SNAKE_CASE ={sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_a , open(_a , '''wb''' ) )
_SCREAMING_SNAKE_CASE =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
_SCREAMING_SNAKE_CASE =RagRetriever(
_a , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
_SCREAMING_SNAKE_CASE =self.get_dummy_dataset()
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _a )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=_a )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_a ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _a )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =RagRetriever.from_pretrained(_a )
self.assertIsInstance(_a , _a )
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever.retrieve(_a , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_canonical_hf_index_retriever()
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , _a )
self.assertIsInstance(_a , np.ndarray )
_SCREAMING_SNAKE_CASE =retriever(
_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a , return_tensors='''pt''' , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
self.assertIsInstance(_a , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self : str ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_dpr_ctx_encoder_tokenizer()
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self.get_dummy_custom_hf_index_retriever(from_disk=_a )
retriever.set_ctx_encoder_tokenizer(_a )
_SCREAMING_SNAKE_CASE =[[5, 7], [10, 11]]
_SCREAMING_SNAKE_CASE =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =retriever(_a , _a , prefix=retriever.config.generator.prefix , n_docs=_a )
self.assertEqual(
len(_a ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _a ) # check for doc token related keys in dictionary. | 691 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = KandinskyImgaImgPipeline
UpperCAmelCase = ["prompt", "image_embeds", "negative_image_embeds", "image"]
UpperCAmelCase = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
UpperCAmelCase = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_SCREAMING_SNAKE_CASE =MultilingualCLIP(_a )
_SCREAMING_SNAKE_CASE =text_encoder.eval()
return text_encoder
@property
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE ={
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_SCREAMING_SNAKE_CASE =UNetaDConditionModel(**_a )
return model
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dummy_text_encoder
_SCREAMING_SNAKE_CASE =self.dummy_tokenizer
_SCREAMING_SNAKE_CASE =self.dummy_unet
_SCREAMING_SNAKE_CASE =self.dummy_movq
_SCREAMING_SNAKE_CASE ={
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
_SCREAMING_SNAKE_CASE =DDIMScheduler(**_a )
_SCREAMING_SNAKE_CASE ={
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __UpperCamelCase ( self : str , _a : int , _a : int=0 ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_SCREAMING_SNAKE_CASE =floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
if str(_a ).startswith('''mps''' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def __UpperCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''cpu'''
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
_SCREAMING_SNAKE_CASE =pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =pipe(**self.get_dummy_inputs(_a ) )
_SCREAMING_SNAKE_CASE =output.images
_SCREAMING_SNAKE_CASE =pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_SCREAMING_SNAKE_CASE =np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
_SCREAMING_SNAKE_CASE =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_SCREAMING_SNAKE_CASE ='''A red cartoon frog, 4k'''
_SCREAMING_SNAKE_CASE =KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_SCREAMING_SNAKE_CASE =KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
_SCREAMING_SNAKE_CASE =pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.Generator(device='''cpu''' ).manual_seed(0 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_SCREAMING_SNAKE_CASE =pipeline(
_a , image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
_SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a ) | 691 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
snake_case_ : List[Any] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , ) | 691 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
snake_case_ : Optional[Any] = logging.get_logger('''transformers.models.speecht5''')
snake_case_ : int = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
snake_case_ : Any = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
snake_case_ : Optional[int] = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
snake_case_ : Tuple = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
snake_case_ : Optional[int] = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
snake_case_ : int = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
snake_case_ : Dict = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
snake_case_ : int = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
snake_case_ : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
snake_case_ : List[str] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
snake_case_ : int = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
snake_case_ : int = []
snake_case_ : Union[str, Any] = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
snake_case_ : Tuple = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
snake_case_ : Any = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
snake_case_ : Optional[int] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
for attribute in key.split('''.'''):
_SCREAMING_SNAKE_CASE =getattr(a__ ,a__)
if weight_type is not None:
_SCREAMING_SNAKE_CASE =getattr(a__ ,a__).shape
else:
_SCREAMING_SNAKE_CASE =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}")
if weight_type == "weight":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "running_mean":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "running_var":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "num_batches_tracked":
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
def lowerCamelCase( a__ ,a__):
for key in ignore_keys:
if key.endswith('''.*'''):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =key.split('''.*.''')
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
if task == "s2t":
_SCREAMING_SNAKE_CASE =hf_model.speechta.encoder.prenet.feature_encoder
_SCREAMING_SNAKE_CASE =MAPPING_S2T
_SCREAMING_SNAKE_CASE =IGNORE_KEYS_S2T
elif task == "t2s":
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =MAPPING_T2S
_SCREAMING_SNAKE_CASE =IGNORE_KEYS_T2S
elif task == "s2s":
_SCREAMING_SNAKE_CASE =hf_model.speechta.encoder.prenet.feature_encoder
_SCREAMING_SNAKE_CASE =MAPPING_S2S
_SCREAMING_SNAKE_CASE =IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}")
for name, value in fairseq_dict.items():
if should_ignore(a__ ,a__):
logger.info(f"{name} was ignored")
continue
_SCREAMING_SNAKE_CASE =False
if "conv_layers" in name:
load_conv_layer(
a__ ,a__ ,a__ ,a__ ,hf_model.config.feat_extract_norm == '''group''' ,)
_SCREAMING_SNAKE_CASE =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =key.split('''.*.''')
if prefix in name and suffix in name:
_SCREAMING_SNAKE_CASE =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_SCREAMING_SNAKE_CASE =True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE =name.split(a__)[0].split('''.''')[-2]
_SCREAMING_SNAKE_CASE =mapped_key.replace('''*''' ,a__)
if "weight_g" in name:
_SCREAMING_SNAKE_CASE ='''weight_g'''
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE ='''weight_v'''
elif "bias" in name:
_SCREAMING_SNAKE_CASE ='''bias'''
elif "weight" in name:
_SCREAMING_SNAKE_CASE ='''weight'''
elif "running_mean" in name:
_SCREAMING_SNAKE_CASE ='''running_mean'''
elif "running_var" in name:
_SCREAMING_SNAKE_CASE ='''running_var'''
elif "num_batches_tracked" in name:
_SCREAMING_SNAKE_CASE ='''num_batches_tracked'''
else:
_SCREAMING_SNAKE_CASE =None
set_recursively(a__ ,a__ ,a__ ,a__ ,a__)
continue
if not is_used:
unused_weights.append(a__)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =full_name.split('''conv_layers.''')[-1]
_SCREAMING_SNAKE_CASE =name.split('''.''')
_SCREAMING_SNAKE_CASE =int(items[0])
_SCREAMING_SNAKE_CASE =int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.")
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.")
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.")
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.")
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(a__)
@torch.no_grad()
def lowerCamelCase( a__ ,a__ ,a__ ,a__=None ,a__=None ,a__=None ,):
if config_path is not None:
_SCREAMING_SNAKE_CASE =SpeechTaConfig.from_pretrained(a__)
else:
_SCREAMING_SNAKE_CASE =SpeechTaConfig()
if task == "s2t":
_SCREAMING_SNAKE_CASE =config.max_text_positions
_SCREAMING_SNAKE_CASE =SpeechTaForSpeechToText(a__)
elif task == "t2s":
_SCREAMING_SNAKE_CASE =1876
_SCREAMING_SNAKE_CASE =600
_SCREAMING_SNAKE_CASE =config.max_speech_positions
_SCREAMING_SNAKE_CASE =SpeechTaForTextToSpeech(a__)
elif task == "s2s":
_SCREAMING_SNAKE_CASE =1876
_SCREAMING_SNAKE_CASE =config.max_speech_positions
_SCREAMING_SNAKE_CASE =SpeechTaForSpeechToSpeech(a__)
else:
raise ValueError(f"Unknown task name: {task}")
if vocab_path:
_SCREAMING_SNAKE_CASE =SpeechTaTokenizer(a__ ,model_max_length=config.max_text_positions)
# Mask token behaves like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE =AddedToken('''<mask>''' ,lstrip=a__ ,rstrip=a__)
_SCREAMING_SNAKE_CASE =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
_SCREAMING_SNAKE_CASE =SpeechTaFeatureExtractor()
_SCREAMING_SNAKE_CASE =SpeechTaProcessor(tokenizer=a__ ,feature_extractor=a__)
processor.save_pretrained(a__)
_SCREAMING_SNAKE_CASE =torch.load(a__)
recursively_load_weights(fairseq_checkpoint['''model'''] ,a__ ,a__)
model.save_pretrained(a__)
if repo_id:
print('''Pushing to the hub...''')
processor.push_to_hub(a__)
model.push_to_hub(a__)
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
) | 691 |
def lowerCamelCase( a__ ,a__):
return int((input_a, input_a).count(0) == 0)
def lowerCamelCase( ):
assert and_gate(0 ,0) == 0
assert and_gate(0 ,1) == 0
assert and_gate(1 ,0) == 0
assert and_gate(1 ,1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 691 | 1 |
def lowerCamelCase( a__ ,a__):
# Check if the input is valid
if not len(a__) == len(a__) == 3:
raise ValueError('''Please enter a valid equation.''')
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''')
# Extract the coefficients
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =equationa
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =equationa
# Calculate the determinants of the matrices
_SCREAMING_SNAKE_CASE =aa * ba - aa * ba
_SCREAMING_SNAKE_CASE =ca * ba - ca * ba
_SCREAMING_SNAKE_CASE =aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''')
else:
raise ValueError('''No solution. (Inconsistent system)''')
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_SCREAMING_SNAKE_CASE =determinant_x / determinant
_SCREAMING_SNAKE_CASE =determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 691 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
snake_case_ : Optional[int] = '''sshleifer/mar_enro_6_3_student'''
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
_SCREAMING_SNAKE_CASE =cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_a , )
_SCREAMING_SNAKE_CASE =f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_SCREAMING_SNAKE_CASE =f"\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n ".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_SCREAMING_SNAKE_CASE =['''finetune.py'''] + bash_script.split() + args
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationModule.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE =main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class A__ ( UpperCamelCase__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =f"{self.test_file_dir_str}/test_data/wmt_en_ro"
_SCREAMING_SNAKE_CASE ={
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
_SCREAMING_SNAKE_CASE =(
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
_SCREAMING_SNAKE_CASE =bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
_SCREAMING_SNAKE_CASE =bash_script.replace(_a , str(_a ) )
_SCREAMING_SNAKE_CASE =self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE =bash_script.replace('''--fp16''' , '''''' )
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =(
['''distillation.py''']
+ bash_script.split()
+ [
f"--output_dir={output_dir}",
'''--gpus=1''',
'''--learning_rate=1e-3''',
f"--num_train_epochs={epochs}",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_a , '''argv''' , _a ):
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
_SCREAMING_SNAKE_CASE =pl.Trainer.add_argparse_args(_a )
_SCREAMING_SNAKE_CASE =SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
_SCREAMING_SNAKE_CASE =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_SCREAMING_SNAKE_CASE =distill_main(_a )
# Check metrics
_SCREAMING_SNAKE_CASE =load_json(model.metrics_save_path )
_SCREAMING_SNAKE_CASE =metrics['''val'''][0]
_SCREAMING_SNAKE_CASE =metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
_SCREAMING_SNAKE_CASE =os.listdir(_a )
_SCREAMING_SNAKE_CASE =[x for x in contents if x.endswith('''.ckpt''' )][0]
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , _a )
_SCREAMING_SNAKE_CASE =torch.load(_a , map_location='''cpu''' )
_SCREAMING_SNAKE_CASE ='''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_SCREAMING_SNAKE_CASE ={os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1 | 691 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict , _a : int , _a : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =jnp.ones((batch_size, length) ) / length
return scores
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =20
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(batch_size=2 , length=_a )
# tweak scores to not be uniform anymore
_SCREAMING_SNAKE_CASE =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
_SCREAMING_SNAKE_CASE =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
_SCREAMING_SNAKE_CASE =jax.nn.softmax(_a , axis=-1 )
_SCREAMING_SNAKE_CASE =FlaxTemperatureLogitsWarper(temperature=0.5 )
_SCREAMING_SNAKE_CASE =FlaxTemperatureLogitsWarper(temperature=1.3 )
_SCREAMING_SNAKE_CASE =jax.nn.softmax(temp_dist_warper_sharper(_a , scores.copy() , cur_len=_a ) , axis=-1 )
_SCREAMING_SNAKE_CASE =jax.nn.softmax(temp_dist_warper_smoother(_a , scores.copy() , cur_len=_a ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =2
# create ramp distribution
_SCREAMING_SNAKE_CASE =np.broadcast_to(np.arange(_a )[None, :] , (batch_size, vocab_size) ).copy()
_SCREAMING_SNAKE_CASE =ramp_logits[1:, : vocab_size // 2] + vocab_size
_SCREAMING_SNAKE_CASE =FlaxTopKLogitsWarper(3 )
_SCREAMING_SNAKE_CASE =top_k_warp(_a , _a , cur_len=_a )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
_SCREAMING_SNAKE_CASE =5
_SCREAMING_SNAKE_CASE =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
_SCREAMING_SNAKE_CASE =np.broadcast_to(np.arange(_a )[None, :] , (batch_size, length) ).copy()
_SCREAMING_SNAKE_CASE =top_k_warp_safety_check(_a , _a , cur_len=_a )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
_SCREAMING_SNAKE_CASE =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
_SCREAMING_SNAKE_CASE =FlaxTopPLogitsWarper(0.8 )
_SCREAMING_SNAKE_CASE =np.exp(top_p_warp(_a , _a , cur_len=_a ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
_SCREAMING_SNAKE_CASE =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_a , _a , atol=1E-3 ) )
# check edge cases with negative and extreme logits
_SCREAMING_SNAKE_CASE =np.broadcast_to(np.arange(_a )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
_SCREAMING_SNAKE_CASE =ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
_SCREAMING_SNAKE_CASE =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
_SCREAMING_SNAKE_CASE =top_p_warp(_a , _a , cur_len=_a )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =20
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
# check that min length is applied at length 5
_SCREAMING_SNAKE_CASE =ids_tensor((batch_size, 20) , vocab_size=20 )
_SCREAMING_SNAKE_CASE =5
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(_a , _a )
_SCREAMING_SNAKE_CASE =min_dist_processor(_a , _a , cur_len=_a )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(_a , _a )
_SCREAMING_SNAKE_CASE =15
_SCREAMING_SNAKE_CASE =min_dist_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =20
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
# check that all scores are -inf except the bos_token_id score
_SCREAMING_SNAKE_CASE =ids_tensor((batch_size, 1) , vocab_size=20 )
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(_a , _a )
_SCREAMING_SNAKE_CASE =logits_processor(_a , _a , cur_len=_a )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(_a , _a )
_SCREAMING_SNAKE_CASE =logits_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =20
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =5
_SCREAMING_SNAKE_CASE =FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
# check that all scores are -inf except the eos_token_id when max_length is reached
_SCREAMING_SNAKE_CASE =ids_tensor((batch_size, 4) , vocab_size=20 )
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(_a , _a )
_SCREAMING_SNAKE_CASE =logits_processor(_a , _a , cur_len=_a )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(_a , _a )
_SCREAMING_SNAKE_CASE =logits_processor(_a , _a , cur_len=_a )
self.assertFalse(jnp.isinf(_a ).any() )
def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =15
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =15
# dummy input_ids and scores
_SCREAMING_SNAKE_CASE =ids_tensor((batch_size, sequence_length) , _a )
_SCREAMING_SNAKE_CASE =input_ids.copy()
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(_a , _a )
_SCREAMING_SNAKE_CASE =scores.copy()
# instantiate all dist processors
_SCREAMING_SNAKE_CASE =FlaxTemperatureLogitsWarper(temperature=0.5 )
_SCREAMING_SNAKE_CASE =FlaxTopKLogitsWarper(3 )
_SCREAMING_SNAKE_CASE =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_SCREAMING_SNAKE_CASE =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
_SCREAMING_SNAKE_CASE =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
_SCREAMING_SNAKE_CASE =FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
_SCREAMING_SNAKE_CASE =10
# no processor list
_SCREAMING_SNAKE_CASE =temp_dist_warp(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =top_k_warp(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =top_p_warp(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =min_dist_proc(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =bos_dist_proc(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =eos_dist_proc(_a , _a , cur_len=_a )
# with processor list
_SCREAMING_SNAKE_CASE =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_SCREAMING_SNAKE_CASE =processor(_a , _a , cur_len=_a )
# scores should be equal
self.assertTrue(jnp.allclose(_a , _a , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =10
_SCREAMING_SNAKE_CASE =15
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =15
# dummy input_ids and scores
_SCREAMING_SNAKE_CASE =ids_tensor((batch_size, sequence_length) , _a )
_SCREAMING_SNAKE_CASE =input_ids.copy()
_SCREAMING_SNAKE_CASE =self._get_uniform_logits(_a , _a )
_SCREAMING_SNAKE_CASE =scores.copy()
# instantiate all dist processors
_SCREAMING_SNAKE_CASE =FlaxTemperatureLogitsWarper(temperature=0.5 )
_SCREAMING_SNAKE_CASE =FlaxTopKLogitsWarper(3 )
_SCREAMING_SNAKE_CASE =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
_SCREAMING_SNAKE_CASE =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_a )
_SCREAMING_SNAKE_CASE =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_a )
_SCREAMING_SNAKE_CASE =FlaxForcedEOSTokenLogitsProcessor(max_length=_a , eos_token_id=_a )
_SCREAMING_SNAKE_CASE =10
# no processor list
def run_no_processor_list(_a : Optional[int] , _a : List[str] , _a : str ):
_SCREAMING_SNAKE_CASE =temp_dist_warp(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =top_k_warp(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =top_p_warp(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =min_dist_proc(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =bos_dist_proc(_a , _a , cur_len=_a )
_SCREAMING_SNAKE_CASE =eos_dist_proc(_a , _a , cur_len=_a )
return scores
# with processor list
def run_processor_list(_a : str , _a : Tuple , _a : Optional[Any] ):
_SCREAMING_SNAKE_CASE =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
_SCREAMING_SNAKE_CASE =processor(_a , _a , cur_len=_a )
return scores
_SCREAMING_SNAKE_CASE =jax.jit(_a )
_SCREAMING_SNAKE_CASE =jax.jit(_a )
_SCREAMING_SNAKE_CASE =jitted_run_no_processor_list(_a , _a , _a )
_SCREAMING_SNAKE_CASE =jitted_run_processor_list(_a , _a , _a )
# scores should be equal
self.assertTrue(jnp.allclose(_a , _a , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 691 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = 0
UpperCAmelCase = False
UpperCAmelCase = 3.0
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_a ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} )
@require_cuda
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_SCREAMING_SNAKE_CASE =Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_SCREAMING_SNAKE_CASE =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _a )
@require_multi_gpu
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
snake_case_ : Optional[Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
snake_case_ : List[str] = Accelerator(kwargs_handlers=[ddp_scaler])
snake_case_ : Dict = torch.nn.Linear(1_00, 2_00)
snake_case_ : List[Any] = accelerator.prepare(model)
# Check the values changed in kwargs
snake_case_ : Dict = ''''''
snake_case_ : str = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 691 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 |
class A__ :
def __init__( self : List[str] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE ={}
def __UpperCamelCase ( self : Any , _a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if vertex not in self.adjacency:
_SCREAMING_SNAKE_CASE ={}
self.num_vertices += 1
def __UpperCamelCase ( self : Optional[int] , _a : Tuple , _a : Tuple , _a : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.add_vertex(_a )
self.add_vertex(_a )
if head == tail:
return
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for i in range(len(_a ) ):
_SCREAMING_SNAKE_CASE =list(edges[i] )
edges.sort(key=lambda _a : e[2] )
for i in range(len(_a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_SCREAMING_SNAKE_CASE =edges[i][2] + 1
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =weight
_SCREAMING_SNAKE_CASE =weight
def __str__( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_SCREAMING_SNAKE_CASE =self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( _a : List[str]=None , _a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Graph()
if vertices is None:
_SCREAMING_SNAKE_CASE =[]
if edges is None:
_SCREAMING_SNAKE_CASE =[]
for vertex in vertices:
g.add_vertex(_a )
for edge in edges:
g.add_edge(*_a )
return g
class A__ :
def __init__( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE ={}
def __len__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return len(self.parent )
def __UpperCamelCase ( self : Dict , _a : Optional[Any] ) -> int:
"""simple docstring"""
if item in self.parent:
return self.find(_a )
_SCREAMING_SNAKE_CASE =item
_SCREAMING_SNAKE_CASE =0
return item
def __UpperCamelCase ( self : str , _a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_a )
if item != self.parent[item]:
_SCREAMING_SNAKE_CASE =self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self : Dict , _a : Optional[int] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.find(_a )
_SCREAMING_SNAKE_CASE =self.find(_a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] < self.rank[roota]:
_SCREAMING_SNAKE_CASE =roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_SCREAMING_SNAKE_CASE =roota
return roota
return None
@staticmethod
def __UpperCamelCase ( _a : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =graph.num_vertices
_SCREAMING_SNAKE_CASE =Graph.UnionFind()
_SCREAMING_SNAKE_CASE =[]
while num_components > 1:
_SCREAMING_SNAKE_CASE ={}
for vertex in graph.get_vertices():
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =graph.get_edges()
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
edges.remove((tail, head, weight) )
for edge in edges:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =edge
_SCREAMING_SNAKE_CASE =union_find.find(_a )
_SCREAMING_SNAKE_CASE =union_find.find(_a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_SCREAMING_SNAKE_CASE =[head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =cheap_edge[vertex]
if union_find.find(_a ) != union_find.find(_a ):
union_find.union(_a , _a )
mst_edges.append(cheap_edge[vertex] )
_SCREAMING_SNAKE_CASE =num_components - 1
_SCREAMING_SNAKE_CASE =Graph.build(edges=_a )
return mst | 691 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Union[str, Any] = {
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
snake_case_ : str = logging.getLogger(__name__)
def lowerCamelCase( a__ ,a__):
return (preds == labels).mean()
@dataclass
class A__ :
UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class A__ :
UpperCAmelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
UpperCAmelCase = field(metadata={"help": "Should contain the data files for the task."} )
UpperCAmelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def lowerCamelCase( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''')
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN ,)
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' ,training_args.local_rank ,training_args.device ,training_args.n_gpu ,bool(training_args.local_rank != -1) ,training_args.fpaa ,)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' ,a__)
# Set seed
set_seed(training_args.seed)
try:
_SCREAMING_SNAKE_CASE =processors[data_args.task_name]()
_SCREAMING_SNAKE_CASE =processor.get_labels()
_SCREAMING_SNAKE_CASE =len(a__)
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=a__ ,finetuning_task=data_args.task_name ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,)
_SCREAMING_SNAKE_CASE =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path) ,config=a__ ,cache_dir=model_args.cache_dir ,)
# Get datasets
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.train ,)
if training_args.do_train
else None
)
_SCREAMING_SNAKE_CASE =(
MultipleChoiceDataset(
data_dir=data_args.data_dir ,tokenizer=a__ ,task=data_args.task_name ,max_seq_length=data_args.max_seq_length ,overwrite_cache=data_args.overwrite_cache ,mode=Split.dev ,)
if training_args.do_eval
else None
)
def compute_metrics(a__) -> Dict:
_SCREAMING_SNAKE_CASE =np.argmax(p.predictions ,axis=1)
return {"acc": simple_accuracy(a__ ,p.label_ids)}
# Data collator
_SCREAMING_SNAKE_CASE =DataCollatorWithPadding(a__ ,pad_to_multiple_of=8) if training_args.fpaa else None
# Initialize our Trainer
_SCREAMING_SNAKE_CASE =Trainer(
model=a__ ,args=a__ ,train_dataset=a__ ,eval_dataset=a__ ,compute_metrics=a__ ,data_collator=a__ ,)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_SCREAMING_SNAKE_CASE ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_SCREAMING_SNAKE_CASE =trainer.evaluate()
_SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir ,'''eval_results.txt''')
if trainer.is_world_master():
with open(a__ ,'''w''') as writer:
logger.info('''***** Eval results *****''')
for key, value in result.items():
logger.info(''' %s = %s''' ,a__ ,a__)
writer.write('''%s = %s\n''' % (key, value))
results.update(a__)
return results
def lowerCamelCase( a__):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 691 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : List[str] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 |
def lowerCamelCase( a__ ,a__ ,a__):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(a__ ,n - 1 ,a__) * a) % mod
else:
_SCREAMING_SNAKE_CASE =binary_exponentiation(a__ ,n / 2 ,a__)
return (b * b) % mod
# a prime number
snake_case_ : Union[str, Any] = 7_01
snake_case_ : int = 10_00_00_00_00
snake_case_ : str = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 691 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_a , '''depth_multiplier''' ) )
class A__ :
def __init__( self : Tuple , _a : Optional[int] , _a : Union[str, Any]=13 , _a : Dict=3 , _a : Dict=32 , _a : List[Any]=0.25 , _a : Any=8 , _a : str=True , _a : Union[str, Any]=1024 , _a : Dict=32 , _a : Union[str, Any]="relu6" , _a : Tuple=0.1 , _a : List[str]=0.02 , _a : List[str]=True , _a : List[Any]=True , _a : Dict=10 , _a : Dict=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =depth_multiplier
_SCREAMING_SNAKE_CASE =min_depth
_SCREAMING_SNAKE_CASE =tf_padding
_SCREAMING_SNAKE_CASE =int(last_hidden_size * depth_multiplier )
_SCREAMING_SNAKE_CASE =output_stride
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =classifier_dropout_prob
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =scope
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Union[str, Any] , _a : int , _a : str , _a : Dict , _a : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =MobileNetVaModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : str , _a : Union[str, Any] , _a : List[str] , _a : Any , _a : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =MobileNetVaForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =MobileNetVaModelTester(self )
_SCREAMING_SNAKE_CASE =MobileNetVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(_a : Dict , _a : List[Any] , _a : Optional[int] ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.hidden_states
_SCREAMING_SNAKE_CASE =26
self.assertEqual(len(_a ) , _a )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =MobileNetVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) ) | 691 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
def __init__( self : Optional[Any] , _a : int , _a : Optional[Any]=3 , _a : Tuple=32 , _a : Any=3 , _a : Union[str, Any]=10 , _a : Optional[int]=[8, 16, 32, 64] , _a : Union[str, Any]=[1, 1, 2, 1] , _a : Optional[Any]=True , _a : int=True , _a : Tuple="relu" , _a : Optional[Any]=3 , _a : str=None , _a : List[Any]=["stage2", "stage3", "stage4"] , _a : Union[str, Any]=[2, 3, 4] , _a : Dict=1 , ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
_SCREAMING_SNAKE_CASE =out_features
_SCREAMING_SNAKE_CASE =out_indices
_SCREAMING_SNAKE_CASE =num_groups
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def __UpperCamelCase ( self : Optional[Any] , _a : Dict , _a : str , _a : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Union[str, Any] , _a : Optional[Any] , _a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =BitForImageClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : List[str] , _a : Any , _a : str , _a : List[str] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =BitBackbone(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCAmelCase = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return
@unittest.skip(reason='''Bit does not output attentions''' )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(_a : Any , _a : Optional[int] , _a : Tuple ):
_SCREAMING_SNAKE_CASE =model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =BitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@require_torch
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = (BitBackbone,) if is_torch_available() else ()
UpperCAmelCase = BitConfig
UpperCAmelCase = False
def __UpperCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BitModelTester(self ) | 691 | 1 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
snake_case_ : str = logging.get_logger('''transformers.models.speecht5''')
def lowerCamelCase( a__ ,a__ ,a__):
hf_model.apply_weight_norm()
_SCREAMING_SNAKE_CASE =checkpoint['''input_conv.weight_g''']
_SCREAMING_SNAKE_CASE =checkpoint['''input_conv.weight_v''']
_SCREAMING_SNAKE_CASE =checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates)):
_SCREAMING_SNAKE_CASE =checkpoint[f"upsamples.{i}.1.weight_g"]
_SCREAMING_SNAKE_CASE =checkpoint[f"upsamples.{i}.1.weight_v"]
_SCREAMING_SNAKE_CASE =checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates) * len(config.resblock_kernel_sizes)):
for j in range(len(config.resblock_dilation_sizes)):
_SCREAMING_SNAKE_CASE =checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
_SCREAMING_SNAKE_CASE =checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
_SCREAMING_SNAKE_CASE =checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
_SCREAMING_SNAKE_CASE =checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
_SCREAMING_SNAKE_CASE =checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
_SCREAMING_SNAKE_CASE =checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
_SCREAMING_SNAKE_CASE =checkpoint['''output_conv.1.weight_g''']
_SCREAMING_SNAKE_CASE =checkpoint['''output_conv.1.weight_v''']
_SCREAMING_SNAKE_CASE =checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def lowerCamelCase( a__ ,a__ ,a__ ,a__=None ,a__=None ,):
if config_path is not None:
_SCREAMING_SNAKE_CASE =SpeechTaHifiGanConfig.from_pretrained(a__)
else:
_SCREAMING_SNAKE_CASE =SpeechTaHifiGanConfig()
_SCREAMING_SNAKE_CASE =SpeechTaHifiGan(a__)
_SCREAMING_SNAKE_CASE =torch.load(a__)
load_weights(orig_checkpoint['''model''']['''generator'''] ,a__ ,a__)
_SCREAMING_SNAKE_CASE =np.load(a__)
_SCREAMING_SNAKE_CASE =stats[0].reshape(-1)
_SCREAMING_SNAKE_CASE =stats[1].reshape(-1)
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__).float()
_SCREAMING_SNAKE_CASE =torch.from_numpy(a__).float()
model.save_pretrained(a__)
if repo_id:
print('''Pushing to the hub...''')
model.push_to_hub(a__)
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
snake_case_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 691 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
snake_case_ : Optional[Any] = R'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(UpperCamelCase__ )
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "rag"
UpperCAmelCase = True
def __init__( self : Tuple , _a : List[Any]=None , _a : Tuple=True , _a : Optional[Any]=None , _a : int=None , _a : List[str]=None , _a : int=None , _a : Optional[int]=None , _a : str=" / " , _a : Any=" // " , _a : Optional[Any]=5 , _a : int=300 , _a : Optional[Any]=768 , _a : Any=8 , _a : List[str]="wiki_dpr" , _a : Dict="train" , _a : Union[str, Any]="compressed" , _a : str=None , _a : Union[str, Any]=None , _a : int=False , _a : Any=False , _a : Any=0.0 , _a : Any=True , _a : List[str]=False , _a : Optional[int]=False , _a : int=False , _a : Union[str, Any]=True , _a : Optional[int]=None , **_a : List[str] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
bos_token_id=_a , pad_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , is_encoder_decoder=_a , prefix=_a , vocab_size=_a , **_a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_SCREAMING_SNAKE_CASE =kwargs.pop('''question_encoder''' )
_SCREAMING_SNAKE_CASE =question_encoder_config.pop('''model_type''' )
_SCREAMING_SNAKE_CASE =kwargs.pop('''generator''' )
_SCREAMING_SNAKE_CASE =decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =AutoConfig.for_model(_a , **_a )
_SCREAMING_SNAKE_CASE =reduce_loss
_SCREAMING_SNAKE_CASE =label_smoothing
_SCREAMING_SNAKE_CASE =exclude_bos_score
_SCREAMING_SNAKE_CASE =do_marginalize
_SCREAMING_SNAKE_CASE =title_sep
_SCREAMING_SNAKE_CASE =doc_sep
_SCREAMING_SNAKE_CASE =n_docs
_SCREAMING_SNAKE_CASE =max_combined_length
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =dataset_split
_SCREAMING_SNAKE_CASE =index_name
_SCREAMING_SNAKE_CASE =retrieval_vector_size
_SCREAMING_SNAKE_CASE =retrieval_batch_size
_SCREAMING_SNAKE_CASE =passages_path
_SCREAMING_SNAKE_CASE =index_path
_SCREAMING_SNAKE_CASE =use_dummy_dataset
_SCREAMING_SNAKE_CASE =output_retrieved
_SCREAMING_SNAKE_CASE =do_deduplication
_SCREAMING_SNAKE_CASE =use_cache
if self.forced_eos_token_id is None:
_SCREAMING_SNAKE_CASE =getattr(self.generator , '''forced_eos_token_id''' , _a )
@classmethod
def __UpperCamelCase ( cls : Optional[int] , _a : PretrainedConfig , _a : PretrainedConfig , **_a : Dict ) -> PretrainedConfig:
"""simple docstring"""
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_a )
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE =self.question_encoder.to_dict()
_SCREAMING_SNAKE_CASE =self.generator.to_dict()
_SCREAMING_SNAKE_CASE =self.__class__.model_type
return output | 691 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
snake_case_ : Optional[Any] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
snake_case_ : Union[str, Any] = {
'''RUCAIBox/mvp''': 10_24,
}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = MvpTokenizer
def __init__( self : Union[str, Any] , _a : Optional[Any]=None , _a : int=None , _a : List[Any]=None , _a : Dict="replace" , _a : Tuple="<s>" , _a : int="</s>" , _a : Dict="</s>" , _a : Optional[int]="<s>" , _a : int="<unk>" , _a : int="<pad>" , _a : str="<mask>" , _a : List[str]=False , _a : List[Any]=True , **_a : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : str , _a : Optional[int] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Union[str, Any] , *_a : str , **_a : List[Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Any , *_a : Optional[int] , **_a : List[str] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Optional[Any] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : str , _a : List[str] , _a : List[str]=None ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : str , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 691 |
from manim import *
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
rect.set_stroke(_a )
_SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 )
self.add(_a )
model_cpu_arr.append(_a )
self.add(*_a , *_a , *_a )
_SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
checkpoint.move_to([3, 0.5, 0] )
self.add(_a )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 )
target.move_to(_a )
ckpt_arr.append(_a )
_SCREAMING_SNAKE_CASE =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_a )
self.add(*_a , *_a )
_SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_a , _a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_a )
_SCREAMING_SNAKE_CASE =MarkupText(
f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 )
_SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 )
_SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) )
_SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(_a ):
_SCREAMING_SNAKE_CASE =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_a , run_time=1.5 ) )
self.play(*_a )
self.play(FadeOut(_a ) )
_SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a , run_time=3 ) )
self.play(
FadeOut(_a , _a , *_a , *_a ) , )
self.wait() | 691 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class A__ ( UpperCamelCase__ ):
def __init__( self : Union[str, Any] , _a : Optional[Any] , _a : List[Any] , _a : Any ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =process
_SCREAMING_SNAKE_CASE =params
def __len__( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : List[Any] , _a : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.dataset[i]
_SCREAMING_SNAKE_CASE =self.process(_a , **self.params )
return processed
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[Any] , _a : int , _a : Any , _a : List[str] , _a : List[Any]=None ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =loader
_SCREAMING_SNAKE_CASE =infer
_SCREAMING_SNAKE_CASE =params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =loader_batch_size
# Internal bookkeeping
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
def __len__( self : List[str] ) -> int:
"""simple docstring"""
return len(self.loader )
def __iter__( self : Tuple ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =iter(self.loader )
return self
def __UpperCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_SCREAMING_SNAKE_CASE =self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_SCREAMING_SNAKE_CASE ={}
for k, element in self._loader_batch_data.items():
if isinstance(_a , _a ):
# Convert ModelOutput to tuple first
_SCREAMING_SNAKE_CASE =element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_a , _a ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_SCREAMING_SNAKE_CASE =tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_SCREAMING_SNAKE_CASE =tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_SCREAMING_SNAKE_CASE =None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE =element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_SCREAMING_SNAKE_CASE =np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_SCREAMING_SNAKE_CASE =element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_SCREAMING_SNAKE_CASE =self._loader_batch_data.__class__(_a )
self._loader_batch_index += 1
return result
def __UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_SCREAMING_SNAKE_CASE =next(self.iterator )
_SCREAMING_SNAKE_CASE =self.infer(_a , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_a , torch.Tensor ):
_SCREAMING_SNAKE_CASE =processed
else:
_SCREAMING_SNAKE_CASE =list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE =processed[key]
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =len(_a )
else:
_SCREAMING_SNAKE_CASE =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE =observed_batch_size
# Setting internal index to unwrap the batch
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class A__ ( UpperCamelCase__ ):
def __init__( self : Any , _a : int , _a : List[str] , _a : Optional[Any] , _a : Any=None ) -> Optional[Any]:
"""simple docstring"""
super().__init__(_a , _a , _a )
def __iter__( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =iter(self.loader )
_SCREAMING_SNAKE_CASE =None
return self
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.subiterator is None:
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_SCREAMING_SNAKE_CASE =next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
_SCREAMING_SNAKE_CASE =next(self.subiterator )
return processed
class A__ ( UpperCamelCase__ ):
def __iter__( self : int ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =iter(self.loader )
return self
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =[]
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE =self.loader_batch_item()
_SCREAMING_SNAKE_CASE =item.pop('''is_last''' )
accumulator.append(_a )
if is_last:
return accumulator
while not is_last:
_SCREAMING_SNAKE_CASE =self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_a , torch.Tensor ):
_SCREAMING_SNAKE_CASE =processed
else:
_SCREAMING_SNAKE_CASE =list(processed.keys() )[0]
_SCREAMING_SNAKE_CASE =processed[key]
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =len(_a )
else:
_SCREAMING_SNAKE_CASE =first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_SCREAMING_SNAKE_CASE =observed_batch_size
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =0
while self._loader_batch_index < self.loader_batch_size:
_SCREAMING_SNAKE_CASE =self.loader_batch_item()
_SCREAMING_SNAKE_CASE =item.pop('''is_last''' )
accumulator.append(_a )
if is_last:
return accumulator
else:
_SCREAMING_SNAKE_CASE =processed
_SCREAMING_SNAKE_CASE =item.pop('''is_last''' )
accumulator.append(_a )
return accumulator
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Dataset , _a : str ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =key
def __len__( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Union[str, Any] , _a : Any ) -> Any:
"""simple docstring"""
return self.dataset[i][self.key]
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : Dataset , _a : str , _a : str ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =dataset
_SCREAMING_SNAKE_CASE =keya
_SCREAMING_SNAKE_CASE =keya
def __len__( self : Any ) -> Tuple:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self : Union[str, Any] , _a : List[Any] ) -> List[str]:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 691 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
snake_case_ : Any = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
snake_case_ : List[str] = {'''facebook/blenderbot-3B''': 1_28}
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = BlenderbotTokenizer
def __init__( self : Dict , _a : str=None , _a : Optional[int]=None , _a : List[str]=None , _a : int="replace" , _a : Dict="<s>" , _a : Optional[Any]="</s>" , _a : Any="</s>" , _a : int="<s>" , _a : int="<unk>" , _a : Optional[int]="<pad>" , _a : Tuple="<mask>" , _a : Tuple=False , _a : Union[str, Any]=True , **_a : List[str] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
_SCREAMING_SNAKE_CASE =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =getattr(_a , pre_tok_state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =pre_tok_class(**_a )
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE ='''post_processor'''
_SCREAMING_SNAKE_CASE =getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
_SCREAMING_SNAKE_CASE =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''sep'''] )
if "cls" in state:
_SCREAMING_SNAKE_CASE =tuple(state['''cls'''] )
_SCREAMING_SNAKE_CASE =False
if state.get('''add_prefix_space''' , _a ) != add_prefix_space:
_SCREAMING_SNAKE_CASE =add_prefix_space
_SCREAMING_SNAKE_CASE =True
if state.get('''trim_offsets''' , _a ) != trim_offsets:
_SCREAMING_SNAKE_CASE =trim_offsets
_SCREAMING_SNAKE_CASE =True
if changes_to_apply:
_SCREAMING_SNAKE_CASE =getattr(_a , state.pop('''type''' ) )
_SCREAMING_SNAKE_CASE =component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCamelCase ( self : Optional[Any] , _a : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
_SCREAMING_SNAKE_CASE =value
def __UpperCamelCase ( self : Optional[Any] , *_a : str , **_a : int ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def __UpperCamelCase ( self : List[Any] , *_a : Optional[int] , **_a : Union[str, Any] ) -> BatchEncoding:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =kwargs.get('''is_split_into_words''' , _a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def __UpperCamelCase ( self : Dict , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def __UpperCamelCase ( self : Any , _a : "Conversation" ) -> List[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(_a )
_SCREAMING_SNAKE_CASE =''' '''.join(_a )
_SCREAMING_SNAKE_CASE =self.encode(_a )
if len(_a ) > self.model_max_length:
_SCREAMING_SNAKE_CASE =input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 691 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
for attribute in key.split('''.'''):
_SCREAMING_SNAKE_CASE =getattr(a__ ,a__)
if weight_type is not None:
_SCREAMING_SNAKE_CASE =getattr(a__ ,a__).shape
else:
_SCREAMING_SNAKE_CASE =hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE =value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def lowerCamelCase( a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE =hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE =False
if "conv_layers" in name:
load_conv_layer(
a__ ,a__ ,a__ ,a__ ,hf_model.config.feat_extract_norm == '''group''' ,)
_SCREAMING_SNAKE_CASE =True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE ='''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''')[-1] == name.split('''.''')[0] and not is_finetuned):
_SCREAMING_SNAKE_CASE =True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE =name.split(a__)[0].split('''.''')[-2]
_SCREAMING_SNAKE_CASE =mapped_key.replace('''*''' ,a__)
if "weight_g" in name:
_SCREAMING_SNAKE_CASE ='''weight_g'''
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE ='''weight_v'''
elif "weight" in name:
_SCREAMING_SNAKE_CASE ='''weight'''
elif "bias" in name:
_SCREAMING_SNAKE_CASE ='''bias'''
else:
_SCREAMING_SNAKE_CASE =None
set_recursively(a__ ,a__ ,a__ ,a__ ,a__)
continue
if not is_used:
unused_weights.append(a__)
logger.warning(f"Unused weights: {unused_weights}")
def lowerCamelCase( a__ ,a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE =full_name.split('''conv_layers.''')[-1]
_SCREAMING_SNAKE_CASE =name.split('''.''')
_SCREAMING_SNAKE_CASE =int(items[0])
_SCREAMING_SNAKE_CASE =int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_SCREAMING_SNAKE_CASE =value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(a__)
@torch.no_grad()
def lowerCamelCase( a__ ,a__ ,a__=None ,a__=None ,a__=True):
if config_path is not None:
_SCREAMING_SNAKE_CASE =HubertConfig.from_pretrained(a__)
else:
_SCREAMING_SNAKE_CASE =HubertConfig()
if is_finetuned:
if dict_path:
_SCREAMING_SNAKE_CASE =Dictionary.load(a__)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_SCREAMING_SNAKE_CASE =target_dict.pad_index
_SCREAMING_SNAKE_CASE =target_dict.bos_index
_SCREAMING_SNAKE_CASE =target_dict.eos_index
_SCREAMING_SNAKE_CASE =len(target_dict.symbols)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''vocab.json''')
if not os.path.isdir(a__):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(a__))
return
os.makedirs(a__ ,exist_ok=a__)
with open(a__ ,'''w''' ,encoding='''utf-8''') as vocab_handle:
json.dump(target_dict.indices ,a__)
_SCREAMING_SNAKE_CASE =WavaVecaCTCTokenizer(
a__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=a__ ,)
_SCREAMING_SNAKE_CASE =True if config.feat_extract_norm == '''layer''' else False
_SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=a__ ,return_attention_mask=a__ ,)
_SCREAMING_SNAKE_CASE =WavaVecaProcessor(feature_extractor=a__ ,tokenizer=a__)
processor.save_pretrained(a__)
_SCREAMING_SNAKE_CASE =HubertForCTC(a__)
else:
_SCREAMING_SNAKE_CASE =HubertModel(a__)
if is_finetuned:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])})
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
_SCREAMING_SNAKE_CASE =model[0].eval()
recursively_load_weights(a__ ,a__ ,a__)
hf_wavavec.save_pretrained(a__)
if __name__ == "__main__":
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
snake_case_ : Tuple = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_SCREAMING_SNAKE_CASE ={
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict:
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any:
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_rust_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a )
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.batch_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 691 | 1 |
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase( a__ ,a__ ,a__):
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''')
_SCREAMING_SNAKE_CASE =b * b - 4 * a * c
_SCREAMING_SNAKE_CASE =(-b + sqrt(a__)) / (2 * a)
_SCREAMING_SNAKE_CASE =(-b - sqrt(a__)) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase( ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =quadratic_roots(a=5 ,b=6 ,c=1)
print(f"The solutions are: {solutiona} and {solutiona}")
if __name__ == "__main__":
main() | 691 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCamelCase( a__ ,a__ ,a__ ,a__):
_SCREAMING_SNAKE_CASE ={
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_SCREAMING_SNAKE_CASE ={
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
_SCREAMING_SNAKE_CASE =f"{src_lang}-{tgt_lang}"
_SCREAMING_SNAKE_CASE =f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=a__ ,exist_ok=a__)
_SCREAMING_SNAKE_CASE =os.path.join(a__ ,'''README.md''')
print(f"Generating {path}")
with open(a__ ,'''w''' ,encoding='''utf-8''') as f:
f.write(a__)
# make sure we are under the root of the project
snake_case_ : Any = Path(__file__).resolve().parent.parent.parent
snake_case_ : Tuple = repo_dir / '''model_cards'''
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
snake_case_ : Union[str, Any] = model_cards_dir / '''allenai''' / model_name
write_model_card(model_card_dir, src_lang='''en''', tgt_lang='''de''', model_name=model_name) | 691 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
snake_case_ : Optional[Any] = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A__ ( unittest.TestCase ):
@classmethod
def __UpperCamelCase ( cls : Dict ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCamelCase ( cls : List[Any] ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a , repo_id='''test-config''' , push_to_hub=_a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a , repo_id='''valid_org/test-config-org''' , push_to_hub=_a , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a , getattr(_a , _a ) )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
CustomConfig.register_for_auto_class()
_SCREAMING_SNAKE_CASE =CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=_a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class A__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_SCREAMING_SNAKE_CASE =c.n_embd + 1 # int
_SCREAMING_SNAKE_CASE =c.resid_pdrop + 1.0 # float
_SCREAMING_SNAKE_CASE =not c.scale_attn_weights # bool
_SCREAMING_SNAKE_CASE =c.summary_type + '''foo''' # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(_a , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(_a , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(_a , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(_a , c.summary_type , '''mismatch for key: summary_type''' )
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =PretrainedConfig()
_SCREAMING_SNAKE_CASE =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_a , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
_SCREAMING_SNAKE_CASE =[key for key, value in config_common_kwargs.items() if value == getattr(_a , _a )]
if len(_a ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f" {', '.join(_a )}." )
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaises(_a ):
# config is in subfolder, the following should not work without specifying the subfolder
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(_a )
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =mock.Mock()
_SCREAMING_SNAKE_CASE =500
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =HTTPError
_SCREAMING_SNAKE_CASE ={}
# Download this model to make sure it's in the cache.
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_a ) as mock_head:
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained('''bert-base-cased''' )
_SCREAMING_SNAKE_CASE =['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =2
json.dump(configuration.to_dict() , open(os.path.join(_a , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_SCREAMING_SNAKE_CASE =['''config.42.0.0.json''']
_SCREAMING_SNAKE_CASE =768
configuration.save_pretrained(_a )
shutil.move(os.path.join(_a , '''config.4.0.0.json''' ) , os.path.join(_a , '''config.42.0.0.json''' ) )
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(_a )
self.assertEqual(new_configuration.hidden_size , 768 )
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
_SCREAMING_SNAKE_CASE ='''v4.0.0'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =new_transformers.models.auto.AutoConfig.from_pretrained(
_a , return_unused_kwargs=_a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_SCREAMING_SNAKE_CASE ='''v3.0.0'''
_SCREAMING_SNAKE_CASE =old_transformers.models.auto.AutoConfig.from_pretrained(_a )
self.assertEqual(old_configuration.hidden_size , 768 ) | 691 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
snake_case_ : Dict = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 691 | 1 |
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =len(a__)
for i in range(length - 1):
_SCREAMING_SNAKE_CASE =i
for k in range(i + 1 ,a__):
if collection[k] < collection[least]:
_SCREAMING_SNAKE_CASE =k
if least != i:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =(collection[i], collection[least])
return collection
if __name__ == "__main__":
snake_case_ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case_ : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted)) | 691 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase( a__):
def wrapper(*a__ ,**a__):
_SCREAMING_SNAKE_CASE =timeit.default_timer()
_SCREAMING_SNAKE_CASE =func(*a__ ,**a__)
_SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
_SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def lowerCamelCase( a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(a__):
_SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(a__ ,_ArrayXD):
_SCREAMING_SNAKE_CASE =np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(a__ ,datasets.Value):
if v.dtype == "string":
_SCREAMING_SNAKE_CASE ='''The small grey turtle was surprisingly fast when challenged.'''
else:
_SCREAMING_SNAKE_CASE =np.random.randint(10 ,size=1).astype(v.dtype).item()
elif isinstance(a__ ,datasets.Sequence):
while isinstance(a__ ,datasets.Sequence):
_SCREAMING_SNAKE_CASE =v.feature
_SCREAMING_SNAKE_CASE =seq_shapes[k]
_SCREAMING_SNAKE_CASE =np.random.rand(*a__).astype(v.dtype)
_SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example))
return dummy_data
def lowerCamelCase( a__ ,a__ ,a__=100 ,a__=None):
_SCREAMING_SNAKE_CASE =generate_examples(a__ ,num_examples=a__ ,seq_shapes=a__)
with ArrowWriter(features=a__ ,path=a__) as writer:
for key, record in dummy_data:
_SCREAMING_SNAKE_CASE =features.encode_example(a__)
writer.write(a__)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.")
_SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=a__ ,info=datasets.DatasetInfo(features=a__))
return dataset | 691 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
for rt in rc.restypes:
_SCREAMING_SNAKE_CASE =rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
_SCREAMING_SNAKE_CASE ={name: i for i, name in enumerate(a__)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
_SCREAMING_SNAKE_CASE =torch.tensor(
a__ ,dtype=torch.intaa ,device=protein['''aatype'''].device ,)
_SCREAMING_SNAKE_CASE =torch.tensor(
a__ ,dtype=torch.intaa ,device=protein['''aatype'''].device ,)
_SCREAMING_SNAKE_CASE =torch.tensor(
a__ ,dtype=torch.floataa ,device=protein['''aatype'''].device ,)
_SCREAMING_SNAKE_CASE =protein['''aatype'''].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_SCREAMING_SNAKE_CASE =restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE =restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE =residx_atomaa_mask
_SCREAMING_SNAKE_CASE =residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_SCREAMING_SNAKE_CASE =restype_atomaa_to_atomaa[protein_aatype]
_SCREAMING_SNAKE_CASE =residx_atomaa_to_atomaa.long()
# create the corresponding mask
_SCREAMING_SNAKE_CASE =torch.zeros([21, 37] ,dtype=torch.floataa ,device=protein['''aatype'''].device)
for restype, restype_letter in enumerate(rc.restypes):
_SCREAMING_SNAKE_CASE =rc.restype_atoa[restype_letter]
_SCREAMING_SNAKE_CASE =rc.residue_atoms[restype_name]
for atom_name in atom_names:
_SCREAMING_SNAKE_CASE =rc.atom_order[atom_name]
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =restype_atomaa_mask[protein_aatype]
_SCREAMING_SNAKE_CASE =residx_atomaa_mask
return protein
def lowerCamelCase( a__):
_SCREAMING_SNAKE_CASE =tree_map(lambda a__: torch.tensor(a__ ,device=batch['''aatype'''].device) ,a__ ,np.ndarray)
_SCREAMING_SNAKE_CASE =tensor_tree_map(lambda a__: np.array(a__) ,make_atomaa_masks(a__))
return out | 691 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
snake_case_ : Optional[Any] = logging.getLogger(__name__)
class A__ ( UpperCamelCase__ ):
def __UpperCamelCase ( self : Optional[int] , _a : Union[str, Any] , _a : List[str] , _a : List[Any]=None , _a : Optional[Any]=None ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.layer[current_layer](_a , _a , head_mask[current_layer] )
_SCREAMING_SNAKE_CASE =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : List[str] , _a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =BertEncoderWithPabee(_a )
self.init_weights()
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : List[str] , _a : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =threshold
def __UpperCamelCase ( self : Dict , _a : int ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =patience
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.inference_layers_num / self.inference_instances_num
_SCREAMING_SNAKE_CASE =(
f"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
f" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(_a )
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[Any] , _a : Optional[Any]=None , _a : Optional[int]=None , _a : Any=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : Union[str, Any]=None , _a : str=None , _a : Any=None , _a : str=None , _a : Optional[Any]=None , _a : Dict=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
_SCREAMING_SNAKE_CASE =input_ids.size()
elif inputs_embeds is not None:
_SCREAMING_SNAKE_CASE =inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
_SCREAMING_SNAKE_CASE =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
if token_type_ids is None:
_SCREAMING_SNAKE_CASE =torch.zeros(_a , dtype=torch.long , device=_a )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_SCREAMING_SNAKE_CASE =self.get_extended_attention_mask(_a , _a , _a )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =encoder_hidden_states.size()
_SCREAMING_SNAKE_CASE =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_SCREAMING_SNAKE_CASE =torch.ones(_a , device=_a )
_SCREAMING_SNAKE_CASE =self.invert_attention_mask(_a )
else:
_SCREAMING_SNAKE_CASE =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_SCREAMING_SNAKE_CASE =self.get_head_mask(_a , self.config.num_hidden_layers )
_SCREAMING_SNAKE_CASE =self.embeddings(
input_ids=_a , position_ids=_a , token_type_ids=_a , inputs_embeds=_a )
_SCREAMING_SNAKE_CASE =embedding_output
if self.training:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.config.num_hidden_layers ):
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](output_dropout(_a ) )
res.append(_a )
elif self.patience == 0: # Use all layers for inference
_SCREAMING_SNAKE_CASE =self.encoder(
_a , attention_mask=_a , head_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
_SCREAMING_SNAKE_CASE =self.pooler(encoder_outputs[0] )
_SCREAMING_SNAKE_CASE =[output_layers[self.config.num_hidden_layers - 1](_a )]
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_SCREAMING_SNAKE_CASE =self.encoder.adaptive_forward(
_a , current_layer=_a , attention_mask=_a , head_mask=_a )
_SCREAMING_SNAKE_CASE =self.pooler(_a )
_SCREAMING_SNAKE_CASE =output_layers[i](_a )
if regression:
_SCREAMING_SNAKE_CASE =logits.detach()
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
else:
_SCREAMING_SNAKE_CASE =logits.detach().argmax(dim=1 )
if patient_result is not None:
_SCREAMING_SNAKE_CASE =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(_a ) ):
patient_counter += 1
else:
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =logits
if patient_counter == self.patience:
break
_SCREAMING_SNAKE_CASE =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase__ , )
class A__ ( UpperCamelCase__ ):
def __init__( self : Optional[int] , _a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a )
_SCREAMING_SNAKE_CASE =config.num_labels
_SCREAMING_SNAKE_CASE =BertModelWithPabee(_a )
_SCREAMING_SNAKE_CASE =nn.Dropout(config.hidden_dropout_prob )
_SCREAMING_SNAKE_CASE =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(_a )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any]=None , _a : List[Any]=None , _a : Union[str, Any]=None , _a : List[str]=None , _a : Dict=None , _a : Optional[Any]=None , _a : Optional[Any]=None , ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.bert(
input_ids=_a , attention_mask=_a , token_type_ids=_a , position_ids=_a , head_mask=_a , inputs_embeds=_a , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_SCREAMING_SNAKE_CASE =(logits[-1],)
if labels is not None:
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =0
for ix, logits_item in enumerate(_a ):
if self.num_labels == 1:
# We are doing regression
_SCREAMING_SNAKE_CASE =MSELoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_SCREAMING_SNAKE_CASE =CrossEntropyLoss()
_SCREAMING_SNAKE_CASE =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_SCREAMING_SNAKE_CASE =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_SCREAMING_SNAKE_CASE =(total_loss / total_weights,) + outputs
return outputs | 691 | 1 |
import argparse
snake_case_ : int = '''docs/source/_static/js/custom.js'''
def lowerCamelCase( a__):
with open(a__ ,encoding='''utf-8''' ,newline='''\n''') as f:
_SCREAMING_SNAKE_CASE =f.readlines()
_SCREAMING_SNAKE_CASE =0
# First let's put the right version
while not lines[index].startswith('''const stableVersion ='''):
index += 1
_SCREAMING_SNAKE_CASE =f"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {'''):
index += 1
# We go until the end
while not lines[index].startswith('''}'''):
index += 1
# We add the new version at the end
lines[index - 1] += f" \"v{version}\": \"v{version}\",\n"
with open(a__ ,'''w''' ,encoding='''utf-8''' ,newline='''\n''') as f:
f.writelines(a__)
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
snake_case_ : Union[str, Any] = parser.parse_args()
update_custom_js(args.version) | 691 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 691 | 1 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class A__ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
_SCREAMING_SNAKE_CASE =text_generator('''This is a test''' , do_sample=_a )
self.assertEqual(
_a , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
_SCREAMING_SNAKE_CASE =text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_a , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
_SCREAMING_SNAKE_CASE =text_generator('''This is a test''' , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{'''generated_token_ids''': ANY(_a )},
{'''generated_token_ids''': ANY(_a )},
] , )
_SCREAMING_SNAKE_CASE =text_generator.model.config.eos_token_id
_SCREAMING_SNAKE_CASE ='''<pad>'''
_SCREAMING_SNAKE_CASE =text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{'''generated_token_ids''': ANY(_a )},
{'''generated_token_ids''': ANY(_a )},
],
[
{'''generated_token_ids''': ANY(_a )},
{'''generated_token_ids''': ANY(_a )},
],
] , )
@require_tf
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
_SCREAMING_SNAKE_CASE =text_generator('''This is a test''' , do_sample=_a )
self.assertEqual(
_a , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
_SCREAMING_SNAKE_CASE =text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_a )
self.assertEqual(
_a , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __UpperCamelCase ( self : Union[str, Any] , _a : Tuple , _a : Optional[Any] , _a : List[str] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =TextGenerationPipeline(model=_a , tokenizer=_a )
return text_generator, ["This is a test", "Another test"]
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''Hello I believe in'''
_SCREAMING_SNAKE_CASE =pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
_SCREAMING_SNAKE_CASE =text_generator(_a )
self.assertEqual(
_a , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
_SCREAMING_SNAKE_CASE =text_generator(_a , stop_sequence=''' fe''' )
self.assertEqual(_a , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __UpperCamelCase ( self : Dict , _a : str , _a : Any ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =text_generator.model
_SCREAMING_SNAKE_CASE =text_generator.tokenizer
_SCREAMING_SNAKE_CASE =text_generator('''This is a test''' )
self.assertEqual(_a , [{'''generated_text''': ANY(_a )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_SCREAMING_SNAKE_CASE =text_generator('''This is a test''' , return_full_text=_a )
self.assertEqual(_a , [{'''generated_text''': ANY(_a )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_SCREAMING_SNAKE_CASE =pipeline(task='''text-generation''' , model=_a , tokenizer=_a , return_full_text=_a )
_SCREAMING_SNAKE_CASE =text_generator('''This is a test''' )
self.assertEqual(_a , [{'''generated_text''': ANY(_a )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
_SCREAMING_SNAKE_CASE =text_generator('''This is a test''' , return_full_text=_a )
self.assertEqual(_a , [{'''generated_text''': ANY(_a )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
_SCREAMING_SNAKE_CASE =text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
_SCREAMING_SNAKE_CASE =text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
[{'''generated_text''': ANY(_a )}, {'''generated_text''': ANY(_a )}],
] , )
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =text_generator('''test''' , return_full_text=_a , return_text=_a )
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =text_generator('''test''' , return_full_text=_a , return_tensors=_a )
with self.assertRaises(_a ):
_SCREAMING_SNAKE_CASE =text_generator('''test''' , return_text=_a , return_tensors=_a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
_SCREAMING_SNAKE_CASE =text_generator('''''' )
self.assertEqual(_a , [{'''generated_text''': ANY(_a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
_SCREAMING_SNAKE_CASE =text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
_SCREAMING_SNAKE_CASE =['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
_SCREAMING_SNAKE_CASE =text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_a ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
import torch
# Classic `model_kwargs`
_SCREAMING_SNAKE_CASE =pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_SCREAMING_SNAKE_CASE =pipe('''This is a test''' )
self.assertEqual(
_a , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
_SCREAMING_SNAKE_CASE =pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
_SCREAMING_SNAKE_CASE =pipe('''This is a test''' )
self.assertEqual(
_a , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
_SCREAMING_SNAKE_CASE =pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
_SCREAMING_SNAKE_CASE =pipe('''This is a test''' )
self.assertEqual(
_a , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
import torch
_SCREAMING_SNAKE_CASE =pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_a , top_p=0.5 )
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ='''Hello world'''
_SCREAMING_SNAKE_CASE =pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
_SCREAMING_SNAKE_CASE =logging.get_logger('''transformers.generation.tf_utils''' )
else:
_SCREAMING_SNAKE_CASE =logging.get_logger('''transformers.generation.utils''' )
_SCREAMING_SNAKE_CASE ='''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_a ) as cl:
_SCREAMING_SNAKE_CASE =text_generator(_a , max_length=10 , max_new_tokens=1 )
self.assertIn(_a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_a ) as cl:
_SCREAMING_SNAKE_CASE =text_generator(_a , max_new_tokens=1 )
self.assertNotIn(_a , cl.out )
with CaptureLogger(_a ) as cl:
_SCREAMING_SNAKE_CASE =text_generator(_a , max_length=10 )
self.assertNotIn(_a , cl.out ) | 691 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(3, 32, 128)
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
# fmt: off
_SCREAMING_SNAKE_CASE =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
_SCREAMING_SNAKE_CASE ={
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_a , _a )
def __UpperCamelCase ( self : Optional[Any] , **_a : str ) -> int:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Optional[int] , **_a : Tuple ) -> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __UpperCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_SCREAMING_SNAKE_CASE =Image.fromarray(np.moveaxis(_a , 0 , -1 ) )
return image_input
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
_SCREAMING_SNAKE_CASE =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a , padding_value=1.0 )
_SCREAMING_SNAKE_CASE =MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' )
_SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =processor(text=_a )
_SCREAMING_SNAKE_CASE =tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE ='''test'''
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_SCREAMING_SNAKE_CASE =processor.char_decode(_a )
_SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a )
_SCREAMING_SNAKE_CASE =[seq.replace(''' ''' , '''''' ) for seq in decoded_tok]
self.assertListEqual(_a , _a )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
_SCREAMING_SNAKE_CASE =processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.get_image_processor()
_SCREAMING_SNAKE_CASE =self.get_tokenizer()
_SCREAMING_SNAKE_CASE =MgpstrProcessor(tokenizer=_a , image_processor=_a )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 38 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 5_0257 )
_SCREAMING_SNAKE_CASE =torch.randn(1 , 27 , 3_0522 )
_SCREAMING_SNAKE_CASE =processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] ) | 691 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.