code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from math import sqrt
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 0
for i in range(1 , int(sqrt(lowercase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase__ ):
total += i + n // i
elif i == sqrt(lowercase__ ):
total += i
return total - n
def _snake_case ( lowercase__ : int = 1_0_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = sum(
i
for i in range(1 , lowercase__ )
if sum_of_divisors(sum_of_divisors(lowercase__ ) ) == i and sum_of_divisors(lowercase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase_ :List[str] = get_activation("""gelu""" )
lowerCAmelCase_ :Optional[int] = get_activation("""gelu_10""" )
lowerCAmelCase_ :Tuple = torch_builtin(__A )
lowerCAmelCase_ :Optional[int] = geluaa(__A )
lowerCAmelCase_ :str = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__A ):
get_activation("""bogus""" )
with self.assertRaises(__A ):
get_activation(__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
lowerCAmelCase_ :List[str] = 1
lowerCAmelCase_ :List[Any] = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
lowerCAmelCase_ :Union[str, Any] = acta.a
| 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 1 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
lowerCAmelCase_ :int = x
lowerCAmelCase_ :str = y
for step in range(lowercase__ ): # noqa: B007
lowerCAmelCase_ :int = a * a - b * b + x
lowerCAmelCase_ :int = 2 * a * b + y
lowerCAmelCase_ :Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _snake_case ( lowercase__ : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _snake_case ( lowercase__ : float ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(lowercase__ , 1 , 1 ) )
def _snake_case ( lowercase__ : int = 8_0_0 , lowercase__ : int = 6_0_0 , lowercase__ : float = -0.6 , lowercase__ : float = 0 , lowercase__ : float = 3.2 , lowercase__ : int = 5_0 , lowercase__ : bool = True , ) -> Image.Image:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Image.new("""RGB""" , (image_width, image_height) )
lowerCAmelCase_ :Union[str, Any] = img.load()
# loop through the image-coordinates
for image_x in range(lowercase__ ):
for image_y in range(lowercase__ ):
# determine the figure-coordinates based on the image-coordinates
lowerCAmelCase_ :Optional[int] = figure_width / image_width * image_height
lowerCAmelCase_ :Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCAmelCase_ :Optional[Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCAmelCase_ :Tuple = get_distance(lowercase__ , lowercase__ , lowercase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCAmelCase_ :int = get_color_coded_rgb(lowercase__ )
else:
lowerCAmelCase_ :str = get_black_and_white_rgb(lowercase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
__UpperCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 1 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : bool = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(lowercase__ ), magnitude * sin(lowercase__ )]
return [magnitude * cos(radians(lowercase__ ) ), magnitude * sin(radians(lowercase__ ) )]
def _snake_case ( lowercase__ : NDArray[floataa] , lowercase__ : NDArray[floataa] , lowercase__ : float = 1_0**-1 ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :NDArray[floataa] = cross(lowercase__ , lowercase__ )
lowerCAmelCase_ :float = sum(lowercase__ )
return abs(lowercase__ ) < eps
if __name__ == "__main__":
# Test to check if it works
__UpperCAmelCase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
__UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__UpperCAmelCase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
__UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__UpperCAmelCase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
__UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__UpperCAmelCase = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__UpperCAmelCase = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__UpperCAmelCase = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : int ) -> Any:
'''simple docstring'''
return float((preds == labels).mean() )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = simple_accuracy(lowercase__ , lowercase__ )
lowerCAmelCase_ :str = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :int = np.array(lowercase__ )
lowerCAmelCase_ :Dict = np.array(lowercase__ )
lowerCAmelCase_ :str = en_sentvecs.shape[0]
# mean centering
lowerCAmelCase_ :Any = en_sentvecs - np.mean(lowercase__ , axis=0 )
lowerCAmelCase_ :List[str] = in_sentvecs - np.mean(lowercase__ , axis=0 )
lowerCAmelCase_ :str = cdist(lowercase__ , lowercase__ , """cosine""" )
lowerCAmelCase_ :Tuple = np.array(range(lowercase__ ) )
lowerCAmelCase_ :Optional[int] = sim.argsort(axis=1 )[:, :1_0]
lowerCAmelCase_ :Optional[Any] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> int:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def __lowerCAmelCase ( self , __A , __A ) -> Tuple:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__A , __A )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__A , __A )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ :Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ :List[str] = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
# pass variant but use the non-variant filenames
lowerCAmelCase_ :List[str] = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
lowerCAmelCase_ :int = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCAmelCase_ :Tuple = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
lowerCAmelCase_ :Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def __lowerCAmelCase ( self ) -> List[Any]:
# pass variant but use the non-variant filenames
lowerCAmelCase_ :Union[str, Any] = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
lowerCAmelCase_ :int = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ :List[Any] = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
| 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = ["input_features", "is_longer"]
def __init__( self , __A=64 , __A=4_8000 , __A=480 , __A=10 , __A=1024 , __A=0.0 , __A=False , __A = 0 , __A = 1_4000 , __A = None , __A = "fusion" , __A = "repeatpad" , **__A , ) -> int:
super().__init__(
feature_size=__A , sampling_rate=__A , padding_value=__A , return_attention_mask=__A , **__A , )
lowerCAmelCase_ :Optional[int] = top_db
lowerCAmelCase_ :str = truncation
lowerCAmelCase_ :Any = padding
lowerCAmelCase_ :int = fft_window_size
lowerCAmelCase_ :int = (fft_window_size >> 1) + 1
lowerCAmelCase_ :Any = hop_length
lowerCAmelCase_ :int = max_length_s
lowerCAmelCase_ :Dict = max_length_s * sampling_rate
lowerCAmelCase_ :str = sampling_rate
lowerCAmelCase_ :int = frequency_min
lowerCAmelCase_ :Optional[int] = frequency_max
lowerCAmelCase_ :int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__A , min_frequency=__A , max_frequency=__A , sampling_rate=__A , norm=__A , mel_scale="""htk""" , )
lowerCAmelCase_ :List[str] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__A , min_frequency=__A , max_frequency=__A , sampling_rate=__A , norm="""slaney""" , mel_scale="""slaney""" , )
def __lowerCAmelCase ( self ) -> Dict[str, Any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ :Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def __lowerCAmelCase ( self , __A , __A = None ) -> np.ndarray:
lowerCAmelCase_ :Tuple = spectrogram(
__A , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__A , log_mel="""dB""" , )
return log_mel_spectrogram.T
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :int = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ :List[str] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ :Any = [0]
# randomly choose index for each part
lowerCAmelCase_ :Tuple = np.random.choice(ranges[0] )
lowerCAmelCase_ :List[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ :str = np.random.choice(ranges[2] )
lowerCAmelCase_ :str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ :List[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ :int = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ :Union[str, Any] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ :Dict = torch.nn.functional.interpolate(
__A , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=__A )
lowerCAmelCase_ :List[str] = mel_shrink[0][0].numpy()
lowerCAmelCase_ :Optional[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def __lowerCAmelCase ( self , __A , __A , __A , __A ) -> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ :Union[str, Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ :List[Any] = len(__A ) - max_length
lowerCAmelCase_ :Any = np.random.randint(0 , overflow + 1 )
lowerCAmelCase_ :Optional[int] = waveform[idx : idx + max_length]
lowerCAmelCase_ :Optional[Any] = self._np_extract_fbank_features(__A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ :List[str] = self._np_extract_fbank_features(__A , self.mel_filters )
lowerCAmelCase_ :Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ :Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ :Optional[int] = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase_ :Dict = False
else:
lowerCAmelCase_ :Tuple = self._random_mel_fusion(__A , __A , __A )
lowerCAmelCase_ :Dict = True
else:
raise NotImplementedError(f"""data_truncating {truncation} not implemented""" )
else:
lowerCAmelCase_ :Union[str, Any] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ :Optional[Any] = int(max_length / len(__A ) )
lowerCAmelCase_ :List[Any] = np.stack(np.tile(__A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ :Optional[int] = int(max_length / len(__A ) )
lowerCAmelCase_ :int = np.stack(np.tile(__A , __A ) )
lowerCAmelCase_ :List[Any] = np.pad(__A , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ :int = self._np_extract_fbank_features(__A , self.mel_filters )
lowerCAmelCase_ :Any = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase_ :int = self._np_extract_fbank_features(__A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , **__A , ) -> BatchFeature:
lowerCAmelCase_ :Union[str, Any] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ :Optional[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCAmelCase_ :Optional[Any] = isinstance(__A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowerCAmelCase_ :Optional[Any] = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ :int = [np.asarray(__A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
lowerCAmelCase_ :List[str] = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ :int = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ :Optional[int] = [np.asarray(__A )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ :Tuple = [
self._get_input_mel(__A , max_length if max_length else self.nb_max_samples , __A , __A )
for waveform in raw_speech
]
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Tuple = []
for mel, longer in padded_inputs:
input_mel.append(__A )
is_longer.append(__A )
if truncation == "fusion" and sum(__A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ :str = np.random.randint(0 , len(__A ) )
lowerCAmelCase_ :List[str] = True
if isinstance(input_mel[0] , __A ):
lowerCAmelCase_ :str = [np.asarray(__A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ :Dict = [[longer] for longer in is_longer]
lowerCAmelCase_ :List[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
lowerCAmelCase_ :Tuple = BatchFeature(__A )
if return_tensors is not None:
lowerCAmelCase_ :Optional[int] = input_features.convert_to_tensors(__A )
return input_features
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = XLNetTokenizer
UpperCAmelCase_ :str = XLNetTokenizerFast
UpperCAmelCase_ :Optional[Any] = True
UpperCAmelCase_ :List[Any] = True
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ :Optional[int] = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = """<s>"""
lowerCAmelCase_ :List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__A ) , 1006 )
def __lowerCAmelCase ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = XLNetTokenizer(__A , keep_accents=__A )
lowerCAmelCase_ :int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
lowerCAmelCase_ :Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :Any = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = XLNetTokenizer(__A , do_lower_case=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = XLNetTokenizer(__A , do_lower_case=__A )
lowerCAmelCase_ :Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
lowerCAmelCase_ :Any = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :int = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :Dict = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# fmt: off
lowerCAmelCase_ :Union[str, Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 1 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = checkpoint
lowerCAmelCase_ :Any = {}
lowerCAmelCase_ :int = vae_state_dict["""encoder.conv_in.weight"""]
lowerCAmelCase_ :Tuple = vae_state_dict["""encoder.conv_in.bias"""]
lowerCAmelCase_ :Optional[int] = vae_state_dict["""encoder.conv_out.weight"""]
lowerCAmelCase_ :str = vae_state_dict["""encoder.conv_out.bias"""]
lowerCAmelCase_ :Any = vae_state_dict["""encoder.norm_out.weight"""]
lowerCAmelCase_ :Optional[int] = vae_state_dict["""encoder.norm_out.bias"""]
lowerCAmelCase_ :Union[str, Any] = vae_state_dict["""decoder.conv_in.weight"""]
lowerCAmelCase_ :List[Any] = vae_state_dict["""decoder.conv_in.bias"""]
lowerCAmelCase_ :str = vae_state_dict["""decoder.conv_out.weight"""]
lowerCAmelCase_ :List[Any] = vae_state_dict["""decoder.conv_out.bias"""]
lowerCAmelCase_ :Optional[Any] = vae_state_dict["""decoder.norm_out.weight"""]
lowerCAmelCase_ :str = vae_state_dict["""decoder.norm_out.bias"""]
lowerCAmelCase_ :List[Any] = vae_state_dict["""quant_conv.weight"""]
lowerCAmelCase_ :str = vae_state_dict["""quant_conv.bias"""]
lowerCAmelCase_ :Optional[int] = vae_state_dict["""post_quant_conv.weight"""]
lowerCAmelCase_ :List[str] = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ :int = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
lowerCAmelCase_ :str = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowercase__ )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ :Union[str, Any] = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
lowerCAmelCase_ :str = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowercase__ )
}
for i in range(lowercase__ ):
lowerCAmelCase_ :Optional[Any] = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowerCAmelCase_ :Optional[int] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowerCAmelCase_ :List[Any] = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowerCAmelCase_ :Tuple = renew_vae_resnet_paths(lowercase__ )
lowerCAmelCase_ :Tuple = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
lowerCAmelCase_ :Optional[int] = [key for key in vae_state_dict if """encoder.mid.block""" in key]
lowerCAmelCase_ :int = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ :List[Any] = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowerCAmelCase_ :Any = renew_vae_resnet_paths(lowercase__ )
lowerCAmelCase_ :Optional[int] = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
lowerCAmelCase_ :List[Any] = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
lowerCAmelCase_ :List[Any] = renew_vae_attention_paths(lowercase__ )
lowerCAmelCase_ :Optional[Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
conv_attn_to_linear(lowercase__ )
for i in range(lowercase__ ):
lowerCAmelCase_ :Optional[int] = num_up_blocks - 1 - i
lowerCAmelCase_ :Any = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowerCAmelCase_ :Dict = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowerCAmelCase_ :Any = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowerCAmelCase_ :Optional[Any] = renew_vae_resnet_paths(lowercase__ )
lowerCAmelCase_ :int = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
lowerCAmelCase_ :List[Any] = [key for key in vae_state_dict if """decoder.mid.block""" in key]
lowerCAmelCase_ :Optional[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ :Union[str, Any] = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowerCAmelCase_ :Any = renew_vae_resnet_paths(lowercase__ )
lowerCAmelCase_ :Dict = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
lowerCAmelCase_ :Union[str, Any] = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
lowerCAmelCase_ :List[str] = renew_vae_attention_paths(lowercase__ )
lowerCAmelCase_ :Optional[Any] = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
conv_attn_to_linear(lowercase__ )
return new_checkpoint
def _snake_case ( lowercase__ : str , lowercase__ : str , ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
lowerCAmelCase_ :List[str] = io.BytesIO(r.content )
lowerCAmelCase_ :str = OmegaConf.load(lowercase__ )
lowerCAmelCase_ :Tuple = 5_1_2
lowerCAmelCase_ :Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
lowerCAmelCase_ :str = {}
with safe_open(lowercase__ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
lowerCAmelCase_ :Tuple = f.get_tensor(lowercase__ )
else:
lowerCAmelCase_ :Union[str, Any] = torch.load(lowercase__ , map_location=lowercase__ )["""state_dict"""]
# Convert the VAE model.
lowerCAmelCase_ :int = create_vae_diffusers_config(lowercase__ , image_size=lowercase__ )
lowerCAmelCase_ :List[str] = custom_convert_ldm_vae_checkpoint(lowercase__ , lowercase__ )
lowerCAmelCase_ :Dict = AutoencoderKL(**lowercase__ )
vae.load_state_dict(lowercase__ )
vae.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__UpperCAmelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 1 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _snake_case ( lowercase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Tuple = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :Dict = emb.weight.shape
lowerCAmelCase_ :Dict = nn.Linear(lowercase__ , lowercase__ , bias=lowercase__ )
lowerCAmelCase_ :List[str] = emb.weight.data
return lin_layer
def _snake_case ( lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = torch.load(lowercase__ , map_location="""cpu""" )
lowerCAmelCase_ :int = Namespace(**checkpoint["""cfg"""]["""model"""] )
lowerCAmelCase_ :Optional[Any] = checkpoint["""model"""]
remove_ignore_keys_(lowercase__ )
lowerCAmelCase_ :int = state_dict["""decoder.embed_tokens.weight"""].shape[0]
lowerCAmelCase_ :List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
lowerCAmelCase_ :int = XGLMConfig(
vocab_size=lowercase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCAmelCase_ :Union[str, Any] = XGLMForCausalLM(lowercase__ )
lowerCAmelCase_ :int = model.load_state_dict(lowercase__ , strict=lowercase__ )
print(lowercase__ )
lowerCAmelCase_ :Any = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[Any] = KandinskyVaaControlnetPipeline
UpperCAmelCase_ :Union[str, Any] = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ :str = ["image_embeds", "negative_image_embeds", "hint"]
UpperCAmelCase_ :Union[str, Any] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase_ :List[Any] = False
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return 32
@property
def __lowerCAmelCase ( self ) -> Tuple:
return 32
@property
def __lowerCAmelCase ( self ) -> int:
return self.time_input_dim
@property
def __lowerCAmelCase ( self ) -> Any:
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self ) -> int:
return 100
@property
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowerCAmelCase_ :Tuple = UNetaDConditionModel(**__A )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = self.dummy_unet
lowerCAmelCase_ :List[Any] = self.dummy_movq
lowerCAmelCase_ :int = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__A , )
lowerCAmelCase_ :Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
lowerCAmelCase_ :Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__A )
# create hint
lowerCAmelCase_ :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :List[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[str] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :str = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Dict = """cpu"""
lowerCAmelCase_ :int = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
lowerCAmelCase_ :Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = pipe(**self.get_dummy_inputs(__A ) )
lowerCAmelCase_ :Union[str, Any] = output.images
lowerCAmelCase_ :int = pipe(
**self.get_dummy_inputs(__A ) , return_dict=__A , )[0]
lowerCAmelCase_ :Optional[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ :Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ :Optional[Any] = np.array(
[0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
lowerCAmelCase_ :Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
lowerCAmelCase_ :Optional[Any] = torch.from_numpy(np.array(__A ) ).float() / 2_5_5.0
lowerCAmelCase_ :str = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
lowerCAmelCase_ :List[str] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__A )
lowerCAmelCase_ :List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
lowerCAmelCase_ :Optional[Any] = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Dict = """A robot, 4k photo"""
lowerCAmelCase_ :Union[str, Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = pipe_prior(
__A , generator=__A , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCAmelCase_ :Tuple = torch.Generator(device="""cuda""" ).manual_seed(0 )
lowerCAmelCase_ :Dict = pipeline(
image_embeds=__A , negative_image_embeds=__A , hint=__A , generator=__A , num_inference_steps=100 , output_type="""np""" , )
lowerCAmelCase_ :Any = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__A , __A )
| 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = (UnCLIPScheduler,)
def __lowerCAmelCase ( self , **__A ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**__A )
return config
def __lowerCAmelCase ( self ) -> Dict:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __lowerCAmelCase ( self ) -> str:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def __lowerCAmelCase ( self ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def __lowerCAmelCase ( self ) -> int:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def __lowerCAmelCase ( self ) -> Any:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ :List[str] = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCAmelCase_ :Optional[Any] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = self.scheduler_classes[0]
lowerCAmelCase_ :List[Any] = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCAmelCase_ :Union[str, Any] = scheduler_class(**__A )
lowerCAmelCase_ :Union[str, Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.0_0_1_0_0_1_1 < 1E-5
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = self.scheduler_classes[0]
lowerCAmelCase_ :int = self.get_scheduler_config()
lowerCAmelCase_ :int = scheduler_class(**__A )
lowerCAmelCase_ :Tuple = scheduler.timesteps
lowerCAmelCase_ :List[str] = self.dummy_model()
lowerCAmelCase_ :Optional[Any] = self.dummy_sample_deter
lowerCAmelCase_ :str = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCAmelCase_ :List[str] = model(__A , __A )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ :Optional[int] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
lowerCAmelCase_ :int = pred_prev_sample
lowerCAmelCase_ :Union[str, Any] = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = self.scheduler_classes[0]
lowerCAmelCase_ :Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ :Dict = scheduler_class(**__A )
scheduler.set_timesteps(25 )
lowerCAmelCase_ :Optional[Any] = scheduler.timesteps
lowerCAmelCase_ :Any = self.dummy_model()
lowerCAmelCase_ :Tuple = self.dummy_sample_deter
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCAmelCase_ :Optional[int] = model(__A , __A )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ :int = None
else:
lowerCAmelCase_ :str = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ :Optional[Any] = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
lowerCAmelCase_ :List[str] = pred_prev_sample
lowerCAmelCase_ :Tuple = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :Dict = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
| 1 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _snake_case ( lowercase__ : float , lowercase__ : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=6 , __A=17 , __A=23 , __A=11 , __A=True , ) -> str:
lowerCAmelCase_ :List[Any] = parent
lowerCAmelCase_ :Dict = batch_size
lowerCAmelCase_ :Any = seq_length
lowerCAmelCase_ :Tuple = act_dim
lowerCAmelCase_ :str = state_dim
lowerCAmelCase_ :str = hidden_size
lowerCAmelCase_ :Dict = max_length
lowerCAmelCase_ :Any = is_training
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowerCAmelCase_ :Tuple = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowerCAmelCase_ :int = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase_ :str = floats_tensor((self.batch_size, self.seq_length, 1) )
lowerCAmelCase_ :Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
lowerCAmelCase_ :Optional[int] = random_attention_mask((self.batch_size, self.seq_length) )
lowerCAmelCase_ :Any = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def __lowerCAmelCase ( self ) -> Any:
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , ) -> Optional[Any]:
lowerCAmelCase_ :str = DecisionTransformerModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[Any] = model(__A , __A , __A , __A , __A , __A )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Optional[Any] = config_and_inputs
lowerCAmelCase_ :Optional[Any] = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = (DecisionTransformerModel,) if is_torch_available() else ()
UpperCAmelCase_ :Tuple = ()
UpperCAmelCase_ :Optional[int] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
UpperCAmelCase_ :str = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
UpperCAmelCase_ :Optional[Any] = False
UpperCAmelCase_ :Union[str, Any] = False
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :Tuple = False
UpperCAmelCase_ :str = False
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :Optional[Any] = False
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :List[str] = False
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :int = DecisionTransformerModelTester(self )
lowerCAmelCase_ :str = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Tuple = DecisionTransformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[str] = model_class(__A )
lowerCAmelCase_ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase_ :Any = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(__A )] , __A )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = 2 # number of steps of autoregressive prediction we will perform
lowerCAmelCase_ :Optional[int] = 10 # defined by the RL environment, may be normalized
lowerCAmelCase_ :Any = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
lowerCAmelCase_ :Any = model.to(__A )
lowerCAmelCase_ :List[str] = model.config
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ) # env.reset()
lowerCAmelCase_ :Union[str, Any] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=__A )
lowerCAmelCase_ :List[str] = torch.tensor(__A , device=__A , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowerCAmelCase_ :Optional[int] = state
lowerCAmelCase_ :Any = torch.zeros(1 , 0 , config.act_dim , device=__A , dtype=torch.floataa )
lowerCAmelCase_ :Optional[Any] = torch.zeros(1 , 0 , device=__A , dtype=torch.floataa )
lowerCAmelCase_ :Tuple = torch.tensor(0 , device=__A , dtype=torch.long ).reshape(1 , 1 )
for step in range(__A ):
lowerCAmelCase_ :Tuple = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=__A )] , dim=1 )
lowerCAmelCase_ :List[Any] = torch.cat([rewards, torch.zeros(1 , 1 , device=__A )] , dim=1 )
lowerCAmelCase_ :Optional[Any] = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = model(
states=__A , actions=__A , rewards=__A , returns_to_go=__A , timesteps=__A , attention_mask=__A , return_dict=__A , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=__A , dtype=torch.floataa ),
1.0,
False,
{},
)
lowerCAmelCase_ :Any = action_pred[0, -1]
lowerCAmelCase_ :Optional[int] = torch.cat([states, state] , dim=1 )
lowerCAmelCase_ :List[str] = returns_to_go[0, -1] - reward
lowerCAmelCase_ :Optional[int] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowerCAmelCase_ :Optional[Any] = torch.cat(
[timesteps, torch.ones((1, 1) , device=__A , dtype=torch.long ) * (step + 1)] , dim=1 )
| 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 1 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__UpperCAmelCase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def _snake_case ( lowercase__ : List[str]=True ) -> List[str]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = None
UpperCAmelCase_ :Optional[Any] = None
def __lowerCAmelCase ( self , __A , __A ) -> Dict:
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :Dict = dataset_module_factory(__A , cache_dir=__A )
lowerCAmelCase_ :List[str] = import_main_class(dataset_module.module_path , dataset=__A )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=__A , config_name=__A , hash=dataset_module.hash , )
lowerCAmelCase_ :Any = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__A ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
lowerCAmelCase_ :Dict = cached_path(__A , cache_dir=__A )
self.assertTrue(os.path.exists(__A ) )
@pytest.mark.integration
def _snake_case ( lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
lowerCAmelCase_ :List[Any] = dataset_module_factory("""wikipedia""" , cache_dir=lowercase__ )
lowerCAmelCase_ :Optional[int] = import_main_class(dataset_module.module_path )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=lowercase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCAmelCase_ :Optional[int] = None
builder_instance.download_and_prepare()
lowerCAmelCase_ :Tuple = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _snake_case ( lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = dataset_module_factory("""wikipedia""" , cache_dir=lowercase__ )
lowerCAmelCase_ :Any = import_main_class(dataset_module.module_path , dataset=lowercase__ )
lowerCAmelCase_ :DatasetBuilder = builder_cls(
cache_dir=lowercase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
lowerCAmelCase_ :Dict = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase__ , lowercase__ )
assert "train" in ds
assert isinstance(ds["""train"""] , lowercase__ )
assert next(iter(ds["""train"""] ) )
| 1 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _snake_case ( lowercase__ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
lowerCAmelCase_ :int = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(lowercase__ )
lowerCAmelCase_ :str = [True] * (num + 1)
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :int = 2
lowerCAmelCase_ :str = int(math.sqrt(lowercase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowercase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowercase__ ):
if sieve[i] is True:
lowerCAmelCase_ :Optional[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowercase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 1 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__UpperCAmelCase = None
try:
import msvcrt
except ImportError:
__UpperCAmelCase = None
try:
import fcntl
except ImportError:
__UpperCAmelCase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__UpperCAmelCase = OSError
# Data
# ------------------------------------------------
__UpperCAmelCase = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__UpperCAmelCase = '3.0.12'
__UpperCAmelCase = None
def _snake_case ( ) -> int:
'''simple docstring'''
global _logger
lowerCAmelCase_ :Any = _logger or logging.getLogger(__name__ )
return _logger
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = lock_file
return None
def __str__( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> List[Any]:
lowerCAmelCase_ :int = lock
return None
def __enter__( self ) -> Any:
return self.lock
def __exit__( self , __A , __A , __A ) -> str:
self.lock.release()
return None
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=-1 , __A=None ) -> str:
lowerCAmelCase_ :Tuple = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowerCAmelCase_ :Tuple = self.hash_filename_if_too_long(__A , __A )
# The path to the lock file.
lowerCAmelCase_ :int = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowerCAmelCase_ :Optional[Any] = None
# The default timeout value.
lowerCAmelCase_ :Tuple = timeout
# We use this lock primarily for the lock counter.
lowerCAmelCase_ :Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowerCAmelCase_ :str = 0
return None
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self._lock_file
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self._timeout
@timeout.setter
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :int = float(__A )
return None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
raise NotImplementedError()
def __lowerCAmelCase ( self ) -> str:
raise NotImplementedError()
@property
def __lowerCAmelCase ( self ) -> str:
return self._lock_file_fd is not None
def __lowerCAmelCase ( self , __A=None , __A=0.0_5 ) -> Any:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowerCAmelCase_ :Optional[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowerCAmelCase_ :str = id(self )
lowerCAmelCase_ :Union[str, Any] = self._lock_file
lowerCAmelCase_ :Dict = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__A )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowerCAmelCase_ :Optional[Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __lowerCAmelCase ( self , __A=False ) -> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowerCAmelCase_ :Optional[Any] = id(self )
lowerCAmelCase_ :Optional[int] = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
lowerCAmelCase_ :Dict = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self ) -> Optional[int]:
self.acquire()
return self
def __exit__( self , __A , __A , __A ) -> str:
self.release()
return None
def __del__( self ) -> str:
self.release(force=__A )
return None
def __lowerCAmelCase ( self , __A , __A ) -> str:
lowerCAmelCase_ :Any = os.path.basename(__A )
if len(__A ) > max_length and max_length > 0:
lowerCAmelCase_ :List[Any] = os.path.dirname(__A )
lowerCAmelCase_ :List[Any] = str(hash(__A ) )
lowerCAmelCase_ :Optional[Any] = filename[: max_length - len(__A ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__A , __A )
else:
return path
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A=-1 , __A=None ) -> List[Any]:
from .file_utils import relative_to_absolute_path
super().__init__(__A , timeout=__A , max_filename_length=__A )
lowerCAmelCase_ :Dict = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowerCAmelCase_ :List[Any] = os.open(self._lock_file , __A )
except OSError:
pass
else:
try:
msvcrt.locking(__A , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__A )
else:
lowerCAmelCase_ :List[Any] = fd
return None
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = self._lock_file_fd
lowerCAmelCase_ :Any = None
msvcrt.locking(__A , msvcrt.LK_UNLCK , 1 )
os.close(__A )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A=-1 , __A=None ) -> Union[str, Any]:
lowerCAmelCase_ :List[Any] = os.statvfs(os.path.dirname(__A ) ).f_namemax
super().__init__(__A , timeout=__A , max_filename_length=__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowerCAmelCase_ :str = os.open(self._lock_file , __A )
try:
fcntl.flock(__A , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__A )
else:
lowerCAmelCase_ :Any = fd
return None
def __lowerCAmelCase ( self ) -> List[str]:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowerCAmelCase_ :Dict = self._lock_file_fd
lowerCAmelCase_ :Union[str, Any] = None
fcntl.flock(__A , fcntl.LOCK_UN )
os.close(__A )
return None
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowerCAmelCase_ :Dict = os.open(self._lock_file , __A )
except OSError:
pass
else:
lowerCAmelCase_ :str = fd
return None
def __lowerCAmelCase ( self ) -> Optional[Any]:
os.close(self._lock_file_fd )
lowerCAmelCase_ :str = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__UpperCAmelCase = None
if msvcrt:
__UpperCAmelCase = WindowsFileLock
elif fcntl:
__UpperCAmelCase = UnixFileLock
else:
__UpperCAmelCase = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :float
UpperCAmelCase_ :TreeNode | None = None
UpperCAmelCase_ :TreeNode | None = None
def _snake_case ( lowercase__ : TreeNode | None ) -> bool:
'''simple docstring'''
def is_valid_tree(lowercase__ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(lowercase__ , lowercase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowercase__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
lowercase__ : TreeNode | None , lowercase__ : float , lowercase__ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowercase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowercase__ )
)
return is_binary_search_tree_recursive_check(lowercase__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :List[nn.Module] = field(default_factory=A__ )
UpperCAmelCase_ :list = field(default_factory=A__ )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :List[Any] = len(list(m.modules() ) ) == 1 or isinstance(__A , nn.Convad ) or isinstance(__A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__A )
def __call__( self , __A ) -> Union[str, Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__A )
[x.remove() for x in self.handles]
return self
@property
def __lowerCAmelCase ( self ) -> int:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :int = 1
UpperCAmelCase_ :List = field(default_factory=A__ )
UpperCAmelCase_ :List = field(default_factory=A__ )
UpperCAmelCase_ :bool = True
def __call__( self , __A ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = Tracker(self.dest )(__A ).parametrized
lowerCAmelCase_ :Tuple = Tracker(self.src )(__A ).parametrized
lowerCAmelCase_ :Optional[Any] = list(filter(lambda __A : type(__A ) not in self.src_skip , __A ) )
lowerCAmelCase_ :List[str] = list(filter(lambda __A : type(__A ) not in self.dest_skip , __A ) )
if len(__A ) != len(__A ) and self.raise_if_mismatch:
raise Exception(
f"""Numbers of operations are different. Source module has {len(__A )} operations while"""
f""" destination module has {len(__A )}.""" )
for dest_m, src_m in zip(__A , __A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Any:
super().__init__()
lowerCAmelCase_ :List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), f"""Unexpected layer name {k}"""
lowerCAmelCase_ :Dict = len(__A ) + 1
feature_blocks.append((f"""res{block_index}""", v) )
lowerCAmelCase_ :List[str] = nn.ModuleDict(__A )
def __lowerCAmelCase ( self , __A ) -> Any:
return get_trunk_forward_outputs(
__A , out_feat_keys=__A , feature_blocks=self._feature_blocks , )
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :int = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , __A ) -> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
lowerCAmelCase_ :Union[str, Any] = self.convert_name_to_timm(__A )
lowerCAmelCase_ :List[Any] = partial(lambda: (timm.create_model(__A , pretrained=__A ).eval(), None) )
else:
lowerCAmelCase_ :List[Any] = super().__getitem__(__A )
return val
class _SCREAMING_SNAKE_CASE ( A__ ):
def __getitem__( self , __A ) -> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
lowerCAmelCase_ :Tuple = RegNetModel
else:
lowerCAmelCase_ :List[Any] = RegNetForImageClassification
return val
def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : List[Tuple[str, str]] ) -> List[str]:
'''simple docstring'''
for from_key, to_key in keys:
lowerCAmelCase_ :Any = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def _snake_case ( lowercase__ : str , lowercase__ : Callable[[], nn.Module] , lowercase__ : Callable[[], nn.Module] , lowercase__ : RegNetConfig , lowercase__ : Path , lowercase__ : bool = True , ) -> Optional[Any]:
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = from_model_func()
lowerCAmelCase_ :List[str] = our_model_func(lowercase__ ).eval()
lowerCAmelCase_ :Optional[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ , raise_if_mismatch=lowercase__ )
lowerCAmelCase_ :Optional[Any] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowercase__ )
if from_state_dict is not None:
lowerCAmelCase_ :Union[str, Any] = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase_ :Any = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
lowerCAmelCase_ :Optional[int] = manually_copy_vissl_head(lowercase__ , our_model.state_dict() , lowercase__ )
our_model.load_state_dict(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = our_model(lowercase__ , output_hidden_states=lowercase__ )
lowerCAmelCase_ :Tuple = (
our_outputs.logits if isinstance(lowercase__ , lowercase__ ) else our_outputs.last_hidden_state
)
lowerCAmelCase_ :Optional[int] = from_model(lowercase__ )
lowerCAmelCase_ :List[Any] = from_output[-1] if type(lowercase__ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase_ :List[str] = our_outputs.hidden_states[-1]
assert torch.allclose(lowercase__ , lowercase__ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
lowerCAmelCase_ :Union[str, Any] = 2_2_4 if """seer""" not in name else 3_8_4
# we can use the convnext one
lowerCAmelCase_ :Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=lowercase__ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
print(f"""Pushed {name}""" )
def _snake_case ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = """imagenet-1k-id2label.json"""
lowerCAmelCase_ :Tuple = 1_0_0_0
lowerCAmelCase_ :List[Any] = (1, num_labels)
lowerCAmelCase_ :Any = """huggingface/label-files"""
lowerCAmelCase_ :Tuple = num_labels
lowerCAmelCase_ :List[Any] = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase_ :List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ :Dict = idalabel
lowerCAmelCase_ :List[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ :Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
lowerCAmelCase_ :Any = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 1_2] , hidden_sizes=[3_2, 6_4, 1_6_0, 3_8_4] , groups_width=1_6 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[4_8, 9_6, 2_4_0, 5_2_8] , groups_width=2_4 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[6_4, 1_2_8, 2_8_8, 6_7_2] , groups_width=1_6 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 2] , hidden_sizes=[7_2, 1_6_8, 4_0_8, 9_1_2] , groups_width=2_4 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_5, 2] , hidden_sizes=[9_6, 1_9_2, 4_3_2, 1_0_0_8] , groups_width=4_8 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_4, 2] , hidden_sizes=[8_0, 2_4_0, 5_6_0, 1_3_6_0] , groups_width=4_0 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 3_9_2, 7_8_4, 1_6_2_4] , groups_width=5_6 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_5, 1] , hidden_sizes=[8_0, 2_4_0, 7_2_0, 1_9_2_0] , groups_width=1_2_0 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_3, 1] , hidden_sizes=[2_5_6, 5_1_2, 8_9_6, 2_0_4_8] , groups_width=1_2_8 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_3, 1] , hidden_sizes=[3_3_6, 6_7_2, 1_3_4_4, 2_5_2_0] , groups_width=1_6_8 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[2_4, 5_6, 1_5_2, 3_6_8] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[4_8, 1_0_4, 2_0_8, 4_4_0] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[4_8, 1_1_2, 2_5_6, 6_0_8] , groups_width=1_6 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[6_4, 1_2_8, 3_2_0, 7_6_8] , groups_width=1_6 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_7, 2] , hidden_sizes=[4_8, 1_2_0, 3_3_6, 8_8_8] , groups_width=2_4 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_3, 1] , hidden_sizes=[7_2, 2_1_6, 5_7_6, 1_5_1_2] , groups_width=2_4 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 1_2, 2] , hidden_sizes=[1_2_8, 1_9_2, 5_1_2, 1_0_8_8] , groups_width=6_4 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_4, 2] , hidden_sizes=[1_4_4, 2_8_8, 5_7_6, 1_2_9_6] , groups_width=7_2 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_0, 1] , hidden_sizes=[1_6_8, 4_4_8, 8_9_6, 2_0_1_6] , groups_width=5_6 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 8_9_6, 2_2_4_0] , groups_width=1_1_2 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 1_1, 1] , hidden_sizes=[2_2_4, 4_4_8, 1_2_3_2, 3_0_2_4] , groups_width=1_1_2 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[2_3_2, 6_9_6, 1_3_9_2, 3_7_1_2] , groups_width=2_3_2 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 1_2, 1] , hidden_sizes=[3_2_8, 9_8_4, 1_9_6_8, 4_9_2_0] , groups_width=3_2_8 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[5_2_8, 1_0_5_6, 2_9_0_4, 7_3_9_2] , groups_width=2_6_4 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 1_6, 1] , hidden_sizes=[6_4_0, 1_6_9_6, 2_5_4_4, 5_0_8_8] , groups_width=6_4_0 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 1_7, 1] , hidden_sizes=[2_0_2_0, 4_0_4_0, 1_1_1_1_0, 2_8_2_8_0] , groups_width=1_0_1_0 ),
}
lowerCAmelCase_ :Any = NameToOurModelFuncMap()
lowerCAmelCase_ :List[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(lowercase__ : str , lowercase__ : Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase_ :Optional[Any] = torch.hub.load_state_dict_from_url(lowercase__ , model_dir=str(lowercase__ ) , map_location="""cpu""" )
lowerCAmelCase_ :str = model_func()
# check if we have a head, if yes add it
lowerCAmelCase_ :Any = files["""classy_state_dict"""]["""base_model"""]["""model"""]
lowerCAmelCase_ :int = model_state_dict["""trunk"""]
model.load_state_dict(lowercase__ )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase_ :List[str] = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ :List[Any] = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ :Union[str, Any] = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase_ :str = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowerCAmelCase_ :List[Any] = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ :Union[str, Any] = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase_ :List[Any] = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase_ :int = partial(
lowercase__ , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=2_7 , group_width=1_0_1_0 , w_a=1_7_4_4 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase__ , lowercase__ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
lowercase__ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase__ , lowercase__ , lowercase__ , )
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def _snake_case ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = credit_card_number
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :str = len(lowercase__ ) - 2
for i in range(lowercase__ , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase_ :int = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowerCAmelCase_ :str = cc_number[:i] + str(lowercase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowercase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def _snake_case ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :int = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 1_3 <= len(lowercase__ ) <= 1_6:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(lowercase__ ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(lowercase__ ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( lowercase__ : str = "isbn/0140328726" ) -> dict:
'''simple docstring'''
lowerCAmelCase_ :Tuple = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCAmelCase_ :Optional[int] = f"""{olid} is not a valid Open Library olid"""
raise ValueError(lowercase__ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def _snake_case ( lowercase__ : dict ) -> dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCAmelCase_ :Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCAmelCase_ :int = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCAmelCase_ :Union[str, Any] = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :int = """, """.join(lowercase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__UpperCAmelCase = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
__UpperCAmelCase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('\n'.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : bool = False ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :Union[str, Any] = f"""Expected string as input, found {type(lowercase__ )}"""
raise ValueError(lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = f"""Expected boolean as use_pascal parameter, found {type(lowercase__ )}"""
raise ValueError(lowercase__ )
lowerCAmelCase_ :List[str] = input_str.split("""_""" )
lowerCAmelCase_ :str = 0 if use_pascal else 1
lowerCAmelCase_ :Optional[Any] = words[start_index:]
lowerCAmelCase_ :List[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCAmelCase_ :Dict = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=__A ).to(__A )
lowerCAmelCase_ :Optional[int] = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowerCAmelCase_ :List[Any] = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
lowerCAmelCase_ :Dict = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
lowerCAmelCase_ :Union[str, Any] = model(input_ids.to(__A ) , labels=labels.to(__A ) ).loss
lowerCAmelCase_ :Tuple = -(labels.shape[-1] * loss.item())
lowerCAmelCase_ :int = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 1 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 1 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = 'PoolFormerConfig'
# Base docstring
__UpperCAmelCase = 'sail/poolformer_s12'
__UpperCAmelCase = [1, 5_12, 7, 7]
# Image classification docstring
__UpperCAmelCase = 'sail/poolformer_s12'
__UpperCAmelCase = 'tabby, tabby cat'
__UpperCAmelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : float = 0.0 , lowercase__ : bool = False ) -> Any:
'''simple docstring'''
if drop_prob == 0.0 or not training:
return input
lowerCAmelCase_ :str = 1 - drop_prob
lowerCAmelCase_ :List[str] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCAmelCase_ :Any = keep_prob + torch.rand(lowercase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCAmelCase_ :Union[str, Any] = input.div(lowercase__ ) * random_tensor
return output
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A = None ) -> None:
super().__init__()
lowerCAmelCase_ :int = drop_prob
def __lowerCAmelCase ( self , __A ) -> torch.Tensor:
return drop_path(__A , self.drop_prob , self.training )
def __lowerCAmelCase ( self ) -> str:
return "p={}".format(self.drop_prob )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A , __A , __A=None ) -> Dict:
super().__init__()
lowerCAmelCase_ :List[str] = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCAmelCase_ :Any = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
lowerCAmelCase_ :Optional[int] = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
lowerCAmelCase_ :Dict = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
lowerCAmelCase_ :Any = norm_layer(__A ) if norm_layer else nn.Identity()
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :Any = self.projection(__A )
lowerCAmelCase_ :Union[str, Any] = self.norm(__A )
return embeddings
class _SCREAMING_SNAKE_CASE ( nn.GroupNorm ):
def __init__( self , __A , **__A ) -> List[str]:
super().__init__(1 , __A , **__A )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Any:
super().__init__()
lowerCAmelCase_ :Union[str, Any] = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
return self.pool(__A ) - hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ :str = nn.Convad(__A , __A , 1 )
lowerCAmelCase_ :List[str] = nn.Convad(__A , __A , 1 )
lowerCAmelCase_ :Tuple = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
lowerCAmelCase_ :Optional[int] = ACTaFN[config.hidden_act]
else:
lowerCAmelCase_ :Tuple = config.hidden_act
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :Tuple = self.conva(__A )
lowerCAmelCase_ :Union[str, Any] = self.act_fn(__A )
lowerCAmelCase_ :Union[str, Any] = self.drop(__A )
lowerCAmelCase_ :Dict = self.conva(__A )
lowerCAmelCase_ :Any = self.drop(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A , __A , __A ) -> Any:
super().__init__()
lowerCAmelCase_ :Union[str, Any] = PoolFormerPooling(__A )
lowerCAmelCase_ :List[Any] = PoolFormerOutput(__A , __A , __A , __A )
lowerCAmelCase_ :Union[str, Any] = PoolFormerGroupNorm(__A )
lowerCAmelCase_ :Optional[int] = PoolFormerGroupNorm(__A )
# Useful for training neural nets
lowerCAmelCase_ :Union[str, Any] = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
lowerCAmelCase_ :Tuple = config.use_layer_scale
if config.use_layer_scale:
lowerCAmelCase_ :str = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
lowerCAmelCase_ :List[Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def __lowerCAmelCase ( self , __A ) -> List[str]:
if self.use_layer_scale:
lowerCAmelCase_ :Tuple = self.pooling(self.before_norm(__A ) )
lowerCAmelCase_ :Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCAmelCase_ :List[str] = hidden_states + self.drop_path(__A )
lowerCAmelCase_ :Any = ()
lowerCAmelCase_ :Optional[Any] = self.output(self.after_norm(__A ) )
lowerCAmelCase_ :int = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCAmelCase_ :Tuple = hidden_states + self.drop_path(__A )
lowerCAmelCase_ :Tuple = (output,) + outputs
return outputs
else:
lowerCAmelCase_ :Any = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
lowerCAmelCase_ :Any = pooling_output + hidden_states
lowerCAmelCase_ :Optional[int] = ()
# Second residual connection inside the PoolFormerOutput block
lowerCAmelCase_ :Optional[int] = self.drop_path(self.output(self.after_norm(__A ) ) )
lowerCAmelCase_ :Optional[int] = hidden_states + layer_output
lowerCAmelCase_ :List[Any] = (output,) + outputs
return outputs
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Tuple:
super().__init__()
lowerCAmelCase_ :Optional[Any] = config
# stochastic depth decay rule
lowerCAmelCase_ :Any = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCAmelCase_ :int = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCAmelCase_ :List[Any] = nn.ModuleList(__A )
# Transformer blocks
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCAmelCase_ :Dict = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
lowerCAmelCase_ :Tuple = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A=False , __A=True ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = () if output_hidden_states else None
lowerCAmelCase_ :Dict = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCAmelCase_ , lowerCAmelCase_ :int = layers
# Get patch embeddings from hidden_states
lowerCAmelCase_ :List[str] = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
lowerCAmelCase_ :int = blk(__A )
lowerCAmelCase_ :int = layer_outputs[0]
if output_hidden_states:
lowerCAmelCase_ :Dict = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = PoolFormerConfig
UpperCAmelCase_ :Dict = "poolformer"
UpperCAmelCase_ :Union[str, Any] = "pixel_values"
UpperCAmelCase_ :Tuple = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def __lowerCAmelCase ( self , __A , __A=False ) -> Tuple:
if isinstance(__A , __A ):
lowerCAmelCase_ :List[Any] = value
__UpperCAmelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCAmelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , A__ , )
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[Any]:
super().__init__(__A )
lowerCAmelCase_ :Dict = config
lowerCAmelCase_ :str = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def __lowerCAmelCase ( self ) -> Optional[int]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , __A = None , __A = None , __A = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
lowerCAmelCase_ :List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowerCAmelCase_ :Optional[Any] = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
lowerCAmelCase_ :Optional[Any] = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ :str = nn.Linear(config.hidden_size , config.hidden_size )
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , A__ , )
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> int:
super().__init__(__A )
lowerCAmelCase_ :List[str] = config.num_labels
lowerCAmelCase_ :Tuple = PoolFormerModel(__A )
# Final norm
lowerCAmelCase_ :Optional[int] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCAmelCase_ :Optional[int] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , __A = None , __A = None , __A = None , __A = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
lowerCAmelCase_ :Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ :List[str] = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
lowerCAmelCase_ :Optional[int] = outputs[0]
lowerCAmelCase_ :Dict = self.classifier(self.norm(__A ).mean([-2, -1] ) )
lowerCAmelCase_ :Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ :str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ :List[Any] = """single_label_classification"""
else:
lowerCAmelCase_ :Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCAmelCase_ :Optional[Any] = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ :Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ :Optional[int] = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ :Union[str, Any] = CrossEntropyLoss()
lowerCAmelCase_ :Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ :Optional[Any] = BCEWithLogitsLoss()
lowerCAmelCase_ :Optional[Any] = loss_fct(__A , __A )
if not return_dict:
lowerCAmelCase_ :List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states )
| 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[int] = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :List[Any] = size if size is not None else {"""shortest_edge""": 384}
lowerCAmelCase_ :Optional[Any] = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :str = do_resize
lowerCAmelCase_ :Union[str, Any] = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ :int = crop_pct if crop_pct is not None else 224 / 256
lowerCAmelCase_ :Optional[int] = resample
lowerCAmelCase_ :str = do_rescale
lowerCAmelCase_ :Dict = rescale_factor
lowerCAmelCase_ :int = do_normalize
lowerCAmelCase_ :Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ :Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , __A , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :int = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ :str = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ :Dict = int(shortest_edge / crop_pct )
lowerCAmelCase_ :List[Any] = get_resize_output_image_size(__A , size=__A , default_to_square=__A )
lowerCAmelCase_ :Optional[Any] = resize(image=__A , size=__A , resample=__A , data_format=__A , **__A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__A , size=(shortest_edge, shortest_edge) , data_format=__A , **__A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__A , size=(shortest_edge, shortest_edge) , resample=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> Tuple:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
lowerCAmelCase_ :Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :List[str] = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ :str = resample if resample is not None else self.resample
lowerCAmelCase_ :str = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :int = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :Any = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :Dict = size if size is not None else self.size
lowerCAmelCase_ :Tuple = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :str = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :List[str] = [to_numpy_array(__A ) for image in images]
if do_resize:
lowerCAmelCase_ :Union[str, Any] = [self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images]
if do_rescale:
lowerCAmelCase_ :Any = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
lowerCAmelCase_ :Union[str, Any] = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
lowerCAmelCase_ :str = [to_channel_dimension_format(__A , __A ) for image in images]
lowerCAmelCase_ :int = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 1 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = logging.get_logger()
# the current default level is logging.WARNING
lowerCAmelCase_ :Optional[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = logging.get_verbosity()
lowerCAmelCase_ :List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
lowerCAmelCase_ :Optional[int] = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__A ) as cl:
logger.warning(__A )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(__A )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowerCAmelCase_ :Tuple = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
lowerCAmelCase_ :str = os.getenv("""TRANSFORMERS_VERBOSITY""" , __A )
lowerCAmelCase_ :Optional[Any] = logging.log_levels[env_level_str]
lowerCAmelCase_ :Union[str, Any] = logging.get_verbosity()
self.assertEqual(
__A , __A , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
lowerCAmelCase_ :Any = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def __lowerCAmelCase ( self ) -> Any:
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase_ :Optional[int] = logging.logging.getLogger()
with CaptureLogger(__A ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def __lowerCAmelCase ( self ) -> Any:
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
lowerCAmelCase_ :List[str] = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
lowerCAmelCase_ :List[str] = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(__A ) as cl:
logger.warning_advice(__A )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__A ) as cl:
logger.warning_advice(__A )
self.assertEqual(cl.out , msg + """\n""" )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=3 , __A=7 , __A=True , __A=True , __A=False , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> Optional[int]:
lowerCAmelCase_ :Tuple = parent
lowerCAmelCase_ :int = batch_size
lowerCAmelCase_ :List[str] = seq_length
lowerCAmelCase_ :int = is_training
lowerCAmelCase_ :Optional[int] = use_input_mask
lowerCAmelCase_ :Tuple = use_token_type_ids
lowerCAmelCase_ :Optional[Any] = use_labels
lowerCAmelCase_ :List[str] = vocab_size
lowerCAmelCase_ :Optional[Any] = hidden_size
lowerCAmelCase_ :Any = num_hidden_layers
lowerCAmelCase_ :List[str] = num_attention_heads
lowerCAmelCase_ :int = intermediate_size
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :Dict = attention_probs_dropout_prob
lowerCAmelCase_ :List[str] = max_position_embeddings
lowerCAmelCase_ :Union[str, Any] = type_vocab_size
lowerCAmelCase_ :Optional[Any] = type_sequence_label_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :Optional[int] = num_labels
lowerCAmelCase_ :Tuple = num_choices
lowerCAmelCase_ :Optional[Any] = scope
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase_ :List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :int = None
lowerCAmelCase_ :List[str] = None
lowerCAmelCase_ :Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Optional[Any]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__A , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]:
lowerCAmelCase_ :List[str] = FalconModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :List[Any] = model(__A , attention_mask=__A )
lowerCAmelCase_ :str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> int:
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :List[str] = FalconModel(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :int = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
lowerCAmelCase_ :Optional[Any] = model(
__A , attention_mask=__A , encoder_hidden_states=__A , )
lowerCAmelCase_ :List[str] = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Dict:
lowerCAmelCase_ :Optional[int] = FalconForCausalLM(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[Any] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[Any] = True
lowerCAmelCase_ :str = True
lowerCAmelCase_ :List[str] = FalconForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
lowerCAmelCase_ :int = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , )
lowerCAmelCase_ :Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase_ :Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ :List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase_ :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase_ :Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase_ :int = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )["""hidden_states"""][0]
lowerCAmelCase_ :Dict = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["""hidden_states"""][0]
# select random slice
lowerCAmelCase_ :List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase_ :Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase_ :List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-3 ) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Optional[Any] = config_and_inputs
lowerCAmelCase_ :str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :str = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase_ :Union[str, Any] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :int = False
UpperCAmelCase_ :int = False
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = FalconModelTester(self )
lowerCAmelCase_ :str = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ , *lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase_ :int = alibi
self.model_tester.create_and_check_model(__A , *__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[Any] = 3
lowerCAmelCase_ :Any = input_dict["""input_ids"""]
lowerCAmelCase_ :int = input_ids.ne(1 ).to(__A )
lowerCAmelCase_ :List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ :Dict = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Any = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :List[str] = 3
lowerCAmelCase_ :Union[str, Any] = """single_label_classification"""
lowerCAmelCase_ :int = input_dict["""input_ids"""]
lowerCAmelCase_ :Tuple = input_ids.ne(1 ).to(__A )
lowerCAmelCase_ :int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase_ :Any = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :int = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[Any] = input_dict["""input_ids"""]
lowerCAmelCase_ :Any = FalconForCausalLM(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[int] = model(__A , use_cache=__A )
lowerCAmelCase_ :Union[str, Any] = input_ids.shape[0]
lowerCAmelCase_ :Tuple = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase_ :Optional[int] = model._convert_cache_to_standard_format(__A , __A )
for layer in range(len(__A ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Optional[Any] = 3
lowerCAmelCase_ :Optional[int] = """multi_label_classification"""
lowerCAmelCase_ :List[str] = input_dict["""input_ids"""]
lowerCAmelCase_ :Optional[int] = input_ids.ne(1 ).to(__A )
lowerCAmelCase_ :Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase_ :Tuple = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[Any] = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __lowerCAmelCase ( self ) -> Tuple:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__A , """use_cache""" ):
return
lowerCAmelCase_ :Optional[Any] = model_class(__A ).to(__A )
if "use_cache" not in inputs:
lowerCAmelCase_ :Any = True
lowerCAmelCase_ :List[str] = model(**__A )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase_ :int = (
getattr(__A , """decoder_layers""" , __A )
or getattr(__A , """num_decoder_layers""" , __A )
or config.num_hidden_layers
)
lowerCAmelCase_ :Optional[int] = getattr(__A , """num_kv_heads""" , config.num_attention_heads )
lowerCAmelCase_ :int = getattr(__A , """d_model""" , config.hidden_size )
lowerCAmelCase_ :Tuple = embed_dim // num_attention_heads
lowerCAmelCase_ :List[str] = outputs["""past_key_values"""]
self.assertEqual(len(__A ) , __A )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = inputs["""input_ids"""].shape
for i in range(__A ):
if config.new_decoder_architecture:
lowerCAmelCase_ :Tuple = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase_ :List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
lowerCAmelCase_ :int = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(__A )
lowerCAmelCase_ :List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
lowerCAmelCase_ :List[str] = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
lowerCAmelCase_ :int = model.generate(**__A , do_sample=__A , max_new_tokens=19 )
lowerCAmelCase_ :List[str] = tokenizer.batch_decode(__A )[0]
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase_ :Union[str, Any] = AutoTokenizer.from_pretrained(__A )
lowerCAmelCase_ :str = FalconForCausalLM.from_pretrained(__A )
model.eval()
model.to(__A )
lowerCAmelCase_ :List[str] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__A , do_sample=__A , max_new_tokens=4 )
model.generate(**__A , do_sample=__A , max_new_tokens=4 )
model.generate(**__A , num_beams=2 , max_new_tokens=4 )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase_ :Optional[int] = AutoTokenizer.from_pretrained(__A )
lowerCAmelCase_ :Dict = FalconForCausalLM.from_pretrained(__A )
model.eval()
model.to(device=__A )
lowerCAmelCase_ :Tuple = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
# Test results are the same with and without cache
lowerCAmelCase_ :List[Any] = model.generate(**__A , do_sample=__A , max_new_tokens=20 , use_cache=__A )
lowerCAmelCase_ :Dict = model.generate(**__A , do_sample=__A , max_new_tokens=20 , use_cache=__A )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(
A__ , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self , __A ) -> np.ndarray:
if self.framework == "tf":
lowerCAmelCase_ :Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase_ :List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def __lowerCAmelCase ( self , __A ) -> np.ndarray:
lowerCAmelCase_ :List[Any] = self.get_masked_index(__A )
lowerCAmelCase_ :List[str] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def __lowerCAmelCase ( self , __A ) -> str:
if isinstance(__A , __A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__A )
def __lowerCAmelCase ( self , __A , __A=None , **__A ) -> Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase_ :Dict = self.framework
lowerCAmelCase_ :Optional[Any] = self.tokenizer(__A , return_tensors=__A )
self.ensure_exactly_one_mask_token(__A )
return model_inputs
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :str = self.model(**__A )
lowerCAmelCase_ :Dict = model_inputs["""input_ids"""]
return model_outputs
def __lowerCAmelCase ( self , __A , __A=5 , __A=None ) -> Optional[int]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase_ :Union[str, Any] = target_ids.shape[0]
lowerCAmelCase_ :Any = model_outputs["""input_ids"""][0]
lowerCAmelCase_ :Any = model_outputs["""logits"""]
if self.framework == "tf":
lowerCAmelCase_ :Any = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase_ :Dict = outputs.numpy()
lowerCAmelCase_ :Tuple = outputs[0, masked_index, :]
lowerCAmelCase_ :Tuple = stable_softmax(__A , axis=-1 )
if target_ids is not None:
lowerCAmelCase_ :Any = tf.gather_nd(tf.squeeze(__A , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase_ :str = tf.expand_dims(__A , 0 )
lowerCAmelCase_ :Tuple = tf.math.top_k(__A , k=__A )
lowerCAmelCase_ , lowerCAmelCase_ :str = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase_ :Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase_ :List[Any] = outputs[0, masked_index, :]
lowerCAmelCase_ :Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase_ :List[str] = probs[..., target_ids]
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = probs.topk(__A )
lowerCAmelCase_ :int = []
lowerCAmelCase_ :Any = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase_ :Any = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase_ :int = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase_ :str = target_ids[p].tolist()
lowerCAmelCase_ :Union[str, Any] = p
# Filter padding out:
lowerCAmelCase_ :Optional[int] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase_ :Dict = self.tokenizer.decode(__A , skip_special_tokens=__A )
lowerCAmelCase_ :Dict = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(__A )
result.append(__A )
if single_mask:
return result[0]
return result
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
if isinstance(__A , __A ):
lowerCAmelCase_ :Dict = [targets]
try:
lowerCAmelCase_ :Optional[Any] = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase_ :int = {}
lowerCAmelCase_ :str = []
for target in targets:
lowerCAmelCase_ :List[Any] = vocab.get(__A , __A )
if id_ is None:
lowerCAmelCase_ :Optional[int] = self.tokenizer(
__A , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , max_length=1 , truncation=__A , )["""input_ids"""]
if len(__A ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowerCAmelCase_ :Any = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
lowerCAmelCase_ :Optional[int] = list(set(__A ) )
if len(__A ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowerCAmelCase_ :Union[str, Any] = np.array(__A )
return target_ids
def __lowerCAmelCase ( self , __A=None , __A=None ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = {}
if targets is not None:
lowerCAmelCase_ :List[Any] = self.get_target_ids(__A , __A )
lowerCAmelCase_ :Optional[Any] = target_ids
if top_k is not None:
lowerCAmelCase_ :List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , __A , *__A , **__A ) -> Optional[int]:
lowerCAmelCase_ :int = super().__call__(__A , **__A )
if isinstance(__A , __A ) and len(__A ) == 1:
return outputs[0]
return outputs
| 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _snake_case ( lowercase__ : int ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = image.size
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase_ :Union[str, Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
lowerCAmelCase_ :Optional[Any] = np.array(lowercase__ ).astype(np.floataa ) / 255.0
lowerCAmelCase_ :Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase_ :List[Any] = torch.from_numpy(lowercase__ )
return 2.0 * image - 1.0
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A , __A , ) -> str:
super().__init__()
self.register_modules(vqvae=__A , unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self , __A = None , __A = 1 , __A = 100 , __A = 0.0 , __A = None , __A = "pil" , __A = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(__A , PIL.Image.Image ):
lowerCAmelCase_ :Optional[Any] = 1
elif isinstance(__A , torch.Tensor ):
lowerCAmelCase_ :Optional[int] = image.shape[0]
else:
raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__A )}""" )
if isinstance(__A , PIL.Image.Image ):
lowerCAmelCase_ :Any = preprocess(__A )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase_ :str = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase_ :str = next(self.unet.parameters() ).dtype
lowerCAmelCase_ :Union[str, Any] = randn_tensor(__A , generator=__A , device=self.device , dtype=__A )
lowerCAmelCase_ :Optional[int] = image.to(device=self.device , dtype=__A )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__A , device=self.device )
lowerCAmelCase_ :List[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ :Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ :Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase_ :Dict = {}
if accepts_eta:
lowerCAmelCase_ :Tuple = eta
for t in self.progress_bar(__A ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase_ :List[Any] = torch.cat([latents, image] , dim=1 )
lowerCAmelCase_ :Optional[int] = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
lowerCAmelCase_ :Union[str, Any] = self.unet(__A , __A ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ :Any = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase_ :Tuple = self.vqvae.decode(__A ).sample
lowerCAmelCase_ :Optional[int] = torch.clamp(__A , -1.0 , 1.0 )
lowerCAmelCase_ :Tuple = image / 2 + 0.5
lowerCAmelCase_ :Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ :str = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A=0.0 , __A = None , __A = "geglu" , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = "layer_norm" , __A = False , ) -> List[str]:
super().__init__()
lowerCAmelCase_ :Union[str, Any] = only_cross_attention
lowerCAmelCase_ :List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
lowerCAmelCase_ :List[Any] = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
lowerCAmelCase_ :List[Any] = AdaLayerNorm(__A , __A )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase_ :List[str] = AdaLayerNormZero(__A , __A )
else:
lowerCAmelCase_ :Dict = nn.LayerNorm(__A , elementwise_affine=__A )
lowerCAmelCase_ :Optional[Any] = Attention(
query_dim=__A , heads=__A , dim_head=__A , dropout=__A , bias=__A , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__A , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
lowerCAmelCase_ :int = (
AdaLayerNorm(__A , __A )
if self.use_ada_layer_norm
else nn.LayerNorm(__A , elementwise_affine=__A )
)
lowerCAmelCase_ :List[str] = Attention(
query_dim=__A , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__A , dim_head=__A , dropout=__A , bias=__A , upcast_attention=__A , ) # is self-attn if encoder_hidden_states is none
else:
lowerCAmelCase_ :str = None
lowerCAmelCase_ :List[str] = None
# 3. Feed-forward
lowerCAmelCase_ :Dict = nn.LayerNorm(__A , elementwise_affine=__A )
lowerCAmelCase_ :Tuple = FeedForward(__A , dropout=__A , activation_fn=__A , final_dropout=__A )
# let chunk size default to None
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :List[str] = 0
def __lowerCAmelCase ( self , __A , __A ) -> str:
# Sets chunk feed-forward
lowerCAmelCase_ :Any = chunk_size
lowerCAmelCase_ :List[Any] = dim
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , ) -> Any:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
lowerCAmelCase_ :Optional[Any] = self.norma(__A , __A )
elif self.use_ada_layer_norm_zero:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.norma(
__A , __A , __A , hidden_dtype=hidden_states.dtype )
else:
lowerCAmelCase_ :str = self.norma(__A )
lowerCAmelCase_ :Union[str, Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
lowerCAmelCase_ :List[Any] = self.attna(
__A , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__A , **__A , )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ :Optional[Any] = gate_msa.unsqueeze(1 ) * attn_output
lowerCAmelCase_ :Dict = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
lowerCAmelCase_ :Dict = (
self.norma(__A , __A ) if self.use_ada_layer_norm else self.norma(__A )
)
lowerCAmelCase_ :Union[str, Any] = self.attna(
__A , encoder_hidden_states=__A , attention_mask=__A , **__A , )
lowerCAmelCase_ :Dict = attn_output + hidden_states
# 3. Feed-forward
lowerCAmelCase_ :Dict = self.norma(__A )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ :Union[str, Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
lowerCAmelCase_ :Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
lowerCAmelCase_ :Tuple = torch.cat(
[self.ff(__A ) for hid_slice in norm_hidden_states.chunk(__A , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
lowerCAmelCase_ :List[str] = self.ff(__A )
if self.use_ada_layer_norm_zero:
lowerCAmelCase_ :str = gate_mlp.unsqueeze(1 ) * ff_output
lowerCAmelCase_ :List[Any] = ff_output + hidden_states
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A = None , __A = 4 , __A = 0.0 , __A = "geglu" , __A = False , ) -> Dict:
super().__init__()
lowerCAmelCase_ :Optional[int] = int(dim * mult )
lowerCAmelCase_ :List[str] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
lowerCAmelCase_ :str = GELU(__A , __A )
if activation_fn == "gelu-approximate":
lowerCAmelCase_ :List[str] = GELU(__A , __A , approximate="""tanh""" )
elif activation_fn == "geglu":
lowerCAmelCase_ :Tuple = GEGLU(__A , __A )
elif activation_fn == "geglu-approximate":
lowerCAmelCase_ :List[Any] = ApproximateGELU(__A , __A )
lowerCAmelCase_ :Optional[Any] = nn.ModuleList([] )
# project in
self.net.append(__A )
# project dropout
self.net.append(nn.Dropout(__A ) )
# project out
self.net.append(nn.Linear(__A , __A ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__A ) )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
for module in self.net:
lowerCAmelCase_ :str = module(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A = "none" ) -> Dict:
super().__init__()
lowerCAmelCase_ :int = nn.Linear(__A , __A )
lowerCAmelCase_ :Tuple = approximate
def __lowerCAmelCase ( self , __A ) -> str:
if gate.device.type != "mps":
return F.gelu(__A , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :List[Any] = self.proj(__A )
lowerCAmelCase_ :List[str] = self.gelu(__A )
return hidden_states
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ :Any = nn.Linear(__A , dim_out * 2 )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
if gate.device.type != "mps":
return F.gelu(__A )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.proj(__A ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__A )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :Optional[int] = nn.Linear(__A , __A )
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :str = self.proj(__A )
return x * torch.sigmoid(1.7_0_2 * x )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A ) -> Tuple:
super().__init__()
lowerCAmelCase_ :int = nn.Embedding(__A , __A )
lowerCAmelCase_ :Optional[Any] = nn.SiLU()
lowerCAmelCase_ :Union[str, Any] = nn.Linear(__A , embedding_dim * 2 )
lowerCAmelCase_ :Dict = nn.LayerNorm(__A , elementwise_affine=__A )
def __lowerCAmelCase ( self , __A , __A ) -> str:
lowerCAmelCase_ :List[Any] = self.linear(self.silu(self.emb(__A ) ) )
lowerCAmelCase_ , lowerCAmelCase_ :str = torch.chunk(__A , 2 )
lowerCAmelCase_ :Optional[Any] = self.norm(__A ) * (1 + scale) + shift
return x
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A ) -> Any:
super().__init__()
lowerCAmelCase_ :Optional[Any] = CombinedTimestepLabelEmbeddings(__A , __A )
lowerCAmelCase_ :int = nn.SiLU()
lowerCAmelCase_ :str = nn.Linear(__A , 6 * embedding_dim , bias=__A )
lowerCAmelCase_ :List[Any] = nn.LayerNorm(__A , elementwise_affine=__A , eps=1E-6 )
def __lowerCAmelCase ( self , __A , __A , __A , __A=None ) -> Tuple:
lowerCAmelCase_ :List[Any] = self.linear(self.silu(self.emb(__A , __A , hidden_dtype=__A ) ) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = emb.chunk(6 , dim=1 )
lowerCAmelCase_ :List[str] = self.norm(__A ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , __A = None , __A = 1E-5 ) -> int:
super().__init__()
lowerCAmelCase_ :Tuple = num_groups
lowerCAmelCase_ :Tuple = eps
if act_fn is None:
lowerCAmelCase_ :int = None
else:
lowerCAmelCase_ :List[Any] = get_activation(__A )
lowerCAmelCase_ :Dict = nn.Linear(__A , out_dim * 2 )
def __lowerCAmelCase ( self , __A , __A ) -> Tuple:
if self.act:
lowerCAmelCase_ :Optional[Any] = self.act(__A )
lowerCAmelCase_ :Optional[int] = self.linear(__A )
lowerCAmelCase_ :Optional[Any] = emb[:, :, None, None]
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = emb.chunk(2 , dim=1 )
lowerCAmelCase_ :Optional[int] = F.group_norm(__A , self.num_groups , eps=self.eps )
lowerCAmelCase_ :int = x * (1 + scale) + shift
return x
| 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "nllb-moe"
UpperCAmelCase_ :List[Any] = ["past_key_values"]
UpperCAmelCase_ :Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=12_8112 , __A=1024 , __A=12 , __A=4096 , __A=16 , __A=12 , __A=4096 , __A=16 , __A=0.0_5 , __A=0.0_5 , __A=True , __A=True , __A="relu" , __A=1024 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.0_2 , __A=2 , __A=True , __A=False , __A="float32" , __A=False , __A=128 , __A=64 , __A=4 , __A=4 , __A=0.0_0_1 , __A=0.0_0_1 , __A="all" , __A=False , __A=False , __A=1.0 , __A=0.2 , __A=1 , __A=0 , __A=2 , __A=False , **__A , ) -> List[str]:
lowerCAmelCase_ :Dict = vocab_size
lowerCAmelCase_ :Optional[int] = max_position_embeddings
lowerCAmelCase_ :Tuple = d_model
lowerCAmelCase_ :Tuple = encoder_ffn_dim
lowerCAmelCase_ :Optional[Any] = encoder_layers
lowerCAmelCase_ :str = encoder_attention_heads
lowerCAmelCase_ :List[str] = decoder_ffn_dim
lowerCAmelCase_ :Any = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Any = dropout
lowerCAmelCase_ :Dict = attention_dropout
lowerCAmelCase_ :int = activation_dropout
lowerCAmelCase_ :str = activation_function
lowerCAmelCase_ :Tuple = init_std
lowerCAmelCase_ :Any = encoder_layerdrop
lowerCAmelCase_ :List[Any] = decoder_layerdrop
lowerCAmelCase_ :int = use_cache
lowerCAmelCase_ :int = encoder_layers
lowerCAmelCase_ :Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ :List[str] = router_z_loss_coef
lowerCAmelCase_ :Any = router_aux_loss_coef
lowerCAmelCase_ :str = decoder_sparse_step
lowerCAmelCase_ :Union[str, Any] = encoder_sparse_step
lowerCAmelCase_ :str = num_experts
lowerCAmelCase_ :int = expert_capacity
lowerCAmelCase_ :Any = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCAmelCase_ :List[Any] = router_dtype
lowerCAmelCase_ :Optional[int] = router_ignore_padding_tokens
lowerCAmelCase_ :List[str] = batch_prioritized_routing
lowerCAmelCase_ :Optional[int] = second_expert_policy
lowerCAmelCase_ :Union[str, Any] = normalize_router_prob_before_dropping
lowerCAmelCase_ :Dict = moe_eval_capacity_token_fraction
lowerCAmelCase_ :List[Any] = moe_token_dropout
lowerCAmelCase_ :int = output_router_logits
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , **__A , )
| 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 1 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A=None , __A=None , __A=0 ) -> List[str]:
lowerCAmelCase_ :Tuple = 1.0 if scale is None else scale
lowerCAmelCase_ :str = 0.0 if loc is None else loc
super().__init__(__A , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__A )] )
@property
def __lowerCAmelCase ( self ) -> Tuple:
return self.base_dist.mean * self.scale + self.loc
@property
def __lowerCAmelCase ( self ) -> Tuple:
return self.base_dist.variance * self.scale**2
@property
def __lowerCAmelCase ( self ) -> Tuple:
return self.variance.sqrt()
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A , __A , __A , **__A ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :Any = args_dim
lowerCAmelCase_ :Dict = nn.ModuleList([nn.Linear(__A , __A ) for dim in args_dim.values()] )
lowerCAmelCase_ :Any = domain_map
def __lowerCAmelCase ( self , __A ) -> Tuple[torch.Tensor]:
lowerCAmelCase_ :str = [proj(__A ) for proj in self.proj]
return self.domain_map(*__A )
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self , __A ) -> List[Any]:
super().__init__()
lowerCAmelCase_ :str = function
def __lowerCAmelCase ( self , __A , *__A ) -> Optional[Any]:
return self.function(__A , *__A )
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :type
UpperCAmelCase_ :int
UpperCAmelCase_ :Dict[str, int]
def __init__( self , __A = 1 ) -> None:
lowerCAmelCase_ :List[str] = dim
lowerCAmelCase_ :Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def __lowerCAmelCase ( self , __A ) -> List[str]:
if self.dim == 1:
return self.distribution_class(*__A )
else:
return Independent(self.distribution_class(*__A ) , 1 )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , ) -> Distribution:
lowerCAmelCase_ :Optional[Any] = self._base_distribution(__A )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__A , loc=__A , scale=__A , event_dim=self.event_dim )
@property
def __lowerCAmelCase ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.event_shape )
@property
def __lowerCAmelCase ( self ) -> float:
return 0.0
def __lowerCAmelCase ( self , __A ) -> nn.Module:
return ParameterProjection(
in_features=__A , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __lowerCAmelCase ( self , *__A ) -> Dict:
raise NotImplementedError()
@staticmethod
def __lowerCAmelCase ( __A ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(__A ) + 4.0 )) / 2.0
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
UpperCAmelCase_ :type = StudentT
@classmethod
def __lowerCAmelCase ( cls , __A , __A , __A ) -> Dict:
lowerCAmelCase_ :List[str] = cls.squareplus(__A ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCAmelCase_ :Dict = 2.0 + cls.squareplus(__A )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict[str, int] = {"loc": 1, "scale": 1}
UpperCAmelCase_ :type = Normal
@classmethod
def __lowerCAmelCase ( cls , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = cls.squareplus(__A ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict[str, int] = {"total_count": 1, "logits": 1}
UpperCAmelCase_ :type = NegativeBinomial
@classmethod
def __lowerCAmelCase ( cls , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = cls.squareplus(__A )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __lowerCAmelCase ( self , __A ) -> Distribution:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__A , logits=__A )
else:
return Independent(self.distribution_class(total_count=__A , logits=__A ) , 1 )
def __lowerCAmelCase ( self , __A , __A = None , __A = None ) -> Distribution:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'microsoft/focalnet-tiny': 'https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ , A__ ):
UpperCAmelCase_ :List[str] = "focalnet"
def __init__( self , __A=224 , __A=4 , __A=3 , __A=96 , __A=False , __A=[192, 384, 768, 768] , __A=[2, 2, 6, 2] , __A=[2, 2, 2, 2] , __A=[3, 3, 3, 3] , __A="gelu" , __A=4.0 , __A=0.0 , __A=0.1 , __A=False , __A=1E-4 , __A=False , __A=False , __A=False , __A=0.0_2 , __A=1E-5 , __A=32 , __A=None , __A=None , **__A , ) -> List[Any]:
super().__init__(**__A )
lowerCAmelCase_ :List[str] = image_size
lowerCAmelCase_ :str = patch_size
lowerCAmelCase_ :Optional[Any] = num_channels
lowerCAmelCase_ :Dict = embed_dim
lowerCAmelCase_ :Dict = use_conv_embed
lowerCAmelCase_ :List[str] = hidden_sizes
lowerCAmelCase_ :List[Any] = depths
lowerCAmelCase_ :Tuple = focal_levels
lowerCAmelCase_ :Union[str, Any] = focal_windows
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :Dict = mlp_ratio
lowerCAmelCase_ :Tuple = hidden_dropout_prob
lowerCAmelCase_ :str = drop_path_rate
lowerCAmelCase_ :Dict = use_layerscale
lowerCAmelCase_ :Optional[int] = layerscale_value
lowerCAmelCase_ :int = use_post_layernorm
lowerCAmelCase_ :int = use_post_layernorm_in_modulation
lowerCAmelCase_ :List[Any] = normalize_modulator
lowerCAmelCase_ :str = initializer_range
lowerCAmelCase_ :int = layer_norm_eps
lowerCAmelCase_ :Tuple = encoder_stride
lowerCAmelCase_ :str = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
| 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 1 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def _snake_case ( lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def _snake_case ( lowercase__ : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = np.max(_outputs , axis=-1 , keepdims=lowercase__ )
lowerCAmelCase_ :List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "sigmoid"
UpperCAmelCase_ :Dict = "softmax"
UpperCAmelCase_ :Tuple = "none"
@add_end_docstrings(
A__ , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = False
UpperCAmelCase_ :List[Any] = ClassificationFunction.NONE
def __init__( self , **__A ) -> List[str]:
super().__init__(**__A )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __lowerCAmelCase ( self , __A=None , __A=None , __A="" , **__A ) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowerCAmelCase_ :Tuple = tokenizer_kwargs
lowerCAmelCase_ :List[str] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
lowerCAmelCase_ :int = self.model.config.return_all_scores
if isinstance(__A , __A ) or top_k is None:
lowerCAmelCase_ :Any = top_k
lowerCAmelCase_ :Optional[int] = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __A , )
if return_all_scores:
lowerCAmelCase_ :Optional[Any] = None
else:
lowerCAmelCase_ :Dict = 1
if isinstance(__A , __A ):
lowerCAmelCase_ :Any = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCAmelCase_ :Optional[Any] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__A , **__A ) -> str:
lowerCAmelCase_ :Dict = super().__call__(*__A , **__A )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCAmelCase_ :Optional[int] = """top_k""" not in kwargs
if isinstance(args[0] , __A ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __lowerCAmelCase ( self , __A , **__A ) -> Dict[str, GenericTensor]:
lowerCAmelCase_ :List[str] = self.framework
if isinstance(__A , __A ):
return self.tokenizer(**__A , return_tensors=__A , **__A )
elif isinstance(__A , __A ) and len(__A ) == 1 and isinstance(inputs[0] , __A ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__A , **__A )
elif isinstance(__A , __A ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(__A , return_tensors=__A , **__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.model(**__A )
def __lowerCAmelCase ( self , __A , __A=None , __A=1 , __A=True ) -> Union[str, Any]:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCAmelCase_ :List[str] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCAmelCase_ :Any = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
lowerCAmelCase_ :Optional[int] = self.model.config.function_to_apply
else:
lowerCAmelCase_ :List[Any] = ClassificationFunction.NONE
lowerCAmelCase_ :Any = model_outputs["""logits"""][0]
lowerCAmelCase_ :Optional[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCAmelCase_ :Optional[int] = sigmoid(__A )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCAmelCase_ :Optional[int] = softmax(__A )
elif function_to_apply == ClassificationFunction.NONE:
lowerCAmelCase_ :List[Any] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCAmelCase_ :int = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__A )
]
if not _legacy:
dict_scores.sort(key=lambda __A : x["score"] , reverse=__A )
if top_k is not None:
lowerCAmelCase_ :List[str] = dict_scores[:top_k]
return dict_scores
| 1 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__UpperCAmelCase = 5_00_03
__UpperCAmelCase = 5_00_02
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = PLBartTokenizer
UpperCAmelCase_ :Any = None
UpperCAmelCase_ :List[Any] = False
def __lowerCAmelCase ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ :List[str] = PLBartTokenizer(__A , language_codes="""base""" , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = PLBartTokenizer(__A , language_codes="""base""" , keep_accents=__A )
lowerCAmelCase_ :int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ :Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :str = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ :Optional[int] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCAmelCase_ :Optional[int] = tokenizer.vocab_size
lowerCAmelCase_ :List[Any] = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 4 , __A )]
self.assertListEqual(__A , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowerCAmelCase_ :Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowerCAmelCase_ :int = tokenizer(__A ).input_ids
self.assertEqual(
tokenizer.decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) , __A , )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = PLBartTokenizer(__A , language_codes="""multi""" , keep_accents=__A )
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :Tuple = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase_ :Optional[int] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCAmelCase_ :Optional[int] = tokenizer.vocab_size
lowerCAmelCase_ :Optional[int] = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 7 , __A )]
self.assertListEqual(
__A , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowerCAmelCase_ :Optional[int] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowerCAmelCase_ :str = tokenizer(__A ).input_ids
self.assertEqual(
tokenizer.decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) , __A , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :Union[str, Any] = "uclanlp/plbart-python-en_XX"
UpperCAmelCase_ :Any = [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
UpperCAmelCase_ :Any = [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
UpperCAmelCase_ :Any = [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def __lowerCAmelCase ( cls ) -> str:
lowerCAmelCase_ :PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
lowerCAmelCase_ :Optional[int] = 1
return cls
def __lowerCAmelCase ( self ) -> Any:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 5_0003 )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertIn(__A , self.tokenizer.all_special_ids )
lowerCAmelCase_ :Optional[Any] = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCAmelCase_ :List[str] = self.tokenizer.decode(__A , skip_special_tokens=__A )
lowerCAmelCase_ :List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__A )
self.assertEqual(__A , __A )
self.assertNotIn(self.tokenizer.eos_token , __A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __A )
lowerCAmelCase_ :Any = 10
lowerCAmelCase_ :Any = self.tokenizer(__A , max_length=__A , truncation=__A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __A )
self.assertEqual(len(__A ) , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [5_0004, 5_0001] )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Tuple = tempfile.mkdtemp()
lowerCAmelCase_ :str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__A )
lowerCAmelCase_ :Union[str, Any] = PLBartTokenizer.from_pretrained(__A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __A )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__A , return_tensors="""pt""" )
lowerCAmelCase_ :str = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __A )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__A , truncation=__A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCAmelCase_ :Any = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__A , __A )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCAmelCase_ :Any = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer(self.src_text , padding=__A , truncation=__A , max_length=3 , return_tensors="""pt""" )
lowerCAmelCase_ :int = self.tokenizer(
text_target=self.tgt_text , padding=__A , truncation=__A , max_length=10 , return_tensors="""pt""" )
lowerCAmelCase_ :Dict = targets["""input_ids"""]
lowerCAmelCase_ :Tuple = shift_tokens_right(__A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__A ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 5_0003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_0001,
} , )
| 1 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , ) -> int:
lowerCAmelCase_ :Optional[Any] = size if size is not None else {"""shortest_edge""": 18}
lowerCAmelCase_ :str = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase_ :Tuple = parent
lowerCAmelCase_ :int = batch_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :Dict = image_size
lowerCAmelCase_ :Optional[int] = min_resolution
lowerCAmelCase_ :Union[str, Any] = max_resolution
lowerCAmelCase_ :Any = do_resize
lowerCAmelCase_ :str = size
lowerCAmelCase_ :Tuple = do_center_crop
lowerCAmelCase_ :Union[str, Any] = crop_size
lowerCAmelCase_ :str = do_normalize
lowerCAmelCase_ :int = image_mean
lowerCAmelCase_ :Tuple = image_std
def __lowerCAmelCase ( self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowerCAmelCase_ :Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __lowerCAmelCase ( self ) -> List[str]:
pass
def __lowerCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCAmelCase_ :Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowerCAmelCase_ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :Optional[int] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase_ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCAmelCase_ :Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :int = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = RoCBertTokenizer
UpperCAmelCase_ :Optional[Any] = None
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :int = True
UpperCAmelCase_ :Tuple = filter_non_english
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
lowerCAmelCase_ :Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """你""", """好""", """是""", """谁""", """a""", """b""", """c""", """d"""]
lowerCAmelCase_ :Dict = {}
lowerCAmelCase_ :Union[str, Any] = {}
for i, value in enumerate(__A ):
lowerCAmelCase_ :List[Any] = i
lowerCAmelCase_ :List[str] = i
lowerCAmelCase_ :List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_shape_file"""] )
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""word_pronunciation_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.word_shape_file , """w""" , encoding="""utf-8""" ) as word_shape_writer:
json.dump(__A , __A , ensure_ascii=__A )
with open(self.word_pronunciation_file , """w""" , encoding="""utf-8""" ) as word_pronunciation_writer:
json.dump(__A , __A , ensure_ascii=__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase_ :Dict = tokenizer.tokenize("""你好[SEP]你是谁""" )
self.assertListEqual(__A , ["""你""", """好""", """[SEP]""", """你""", """是""", """谁"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__A ) , [5, 6, 2, 5, 7, 8] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = RoCBertBasicTokenizer(do_lower_case=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = RoCBertBasicTokenizer(do_lower_case=__A , strip_accents=__A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :int = RoCBertBasicTokenizer(do_lower_case=__A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
lowerCAmelCase_ :Any = {}
for i, token in enumerate(__A ):
lowerCAmelCase_ :int = i
lowerCAmelCase_ :Union[str, Any] = RoCBertWordpieceTokenizer(vocab=__A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def __lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
if self.test_rust_tokenizer:
lowerCAmelCase_ :List[str] = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
def __lowerCAmelCase ( self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
lowerCAmelCase_ :List[Any] = tokenizer_r.encode_plus(
__A , return_attention_mask=__A , return_token_type_ids=__A , return_offsets_mapping=__A , add_special_tokens=__A , )
lowerCAmelCase_ :List[str] = tokenizer_r.do_lower_case if hasattr(__A , """do_lower_case""" ) else False
lowerCAmelCase_ :List[str] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Tuple = ["""的""", """人""", """有"""]
lowerCAmelCase_ :Optional[Any] = """""".join(__A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ :str = True
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :str = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :Optional[Any] = tokenizer_p.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer_r.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :List[Any] = tokenizer_r.convert_ids_to_tokens(__A )
lowerCAmelCase_ :List[Any] = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
lowerCAmelCase_ :Union[str, Any] = False
lowerCAmelCase_ :Tuple = self.rust_tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :str = self.tokenizer_class.from_pretrained(__A , **__A )
lowerCAmelCase_ :Dict = tokenizer_r.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :Tuple = tokenizer_p.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :Dict = tokenizer_r.convert_ids_to_tokens(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer_p.convert_ids_to_tokens(__A )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase_ :int = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__A )
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase_ :Tuple = tokenizer.encode("""你好""" , add_special_tokens=__A )
lowerCAmelCase_ :Union[str, Any] = tokenizer.encode("""你是谁""" , add_special_tokens=__A )
lowerCAmelCase_ :Dict = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizers(do_lower_case=__A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase_ :str = """你好,你是谁"""
lowerCAmelCase_ :List[str] = tokenizer.tokenize(__A )
lowerCAmelCase_ :Any = tokenizer.convert_tokens_to_ids(__A )
lowerCAmelCase_ :Tuple = tokenizer.convert_tokens_to_shape_ids(__A )
lowerCAmelCase_ :Dict = tokenizer.convert_tokens_to_pronunciation_ids(__A )
lowerCAmelCase_ :List[Any] = tokenizer.prepare_for_model(
__A , __A , __A , add_special_tokens=__A )
lowerCAmelCase_ :Union[str, Any] = tokenizer.encode_plus(__A , add_special_tokens=__A )
self.assertEqual(__A , __A )
| 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 1 |
"""simple docstring"""
from string import ascii_uppercase
__UpperCAmelCase = {str(ord(c) - 55): c for c in ascii_uppercase}
def _snake_case ( lowercase__ : int , lowercase__ : int ) -> str:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 3_6:
raise ValueError("""base must be <= 36""" )
lowerCAmelCase_ :Any = """"""
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Any = 0
while div != 1:
lowerCAmelCase_ , lowerCAmelCase_ :Any = divmod(lowercase__ , lowercase__ )
if base >= 1_1 and 9 < mod < 3_6:
lowerCAmelCase_ :List[str] = ALPHABET_VALUES[str(lowercase__ )]
else:
lowerCAmelCase_ :Dict = str(lowercase__ )
new_value += actual_value
lowerCAmelCase_ :int = num // base
lowerCAmelCase_ :Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowercase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__UpperCAmelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__UpperCAmelCase = {'vinai/bartpho-syllable': 10_24}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ :Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :List[str] = ["input_ids", "attention_mask"]
def __init__( self , __A , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ :Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCAmelCase_ :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCAmelCase_ :Any = vocab_file
lowerCAmelCase_ :Optional[int] = monolingual_vocab_file
lowerCAmelCase_ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCAmelCase_ :Tuple = {}
lowerCAmelCase_ :List[str] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__A ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase_ :List[str] = cnt
cnt += 1
with open(__A , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
lowerCAmelCase_ :int = line.strip().split()[0]
lowerCAmelCase_ :int = len(self.fairseq_tokens_to_ids )
if str(__A ) not in self.fairseq_tokens_to_ids:
lowerCAmelCase_ :Union[str, Any] = len(self.fairseq_tokens_to_ids )
lowerCAmelCase_ :List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Any:
lowerCAmelCase_ :Optional[Any] = self.__dict__.copy()
lowerCAmelCase_ :str = None
lowerCAmelCase_ :str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ) -> Any:
lowerCAmelCase_ :Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ :int = {}
lowerCAmelCase_ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ :Tuple = [self.cls_token_id]
lowerCAmelCase_ :Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Optional[int] = [self.sep_token_id]
lowerCAmelCase_ :List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return len(self.fairseq_ids_to_tokens )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def __lowerCAmelCase ( self , __A ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowerCAmelCase ( self , __A ) -> List[str]:
return self.fairseq_ids_to_tokens[index]
def __lowerCAmelCase ( self , __A ) -> List[Any]:
lowerCAmelCase_ :int = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase_ :List[Any] = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :Tuple = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
lowerCAmelCase_ :int = self.sp_model.serialized_model_proto()
fi.write(__A )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__A ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __A )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__A , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(__A )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :list[list[str]] = [[] for _ in range(lowercase__ )]
lowerCAmelCase_ :List[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowercase__ ) <= key:
return input_string
for position, character in enumerate(lowercase__ ):
lowerCAmelCase_ :Dict = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_ :Dict = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase__ )
lowerCAmelCase_ :List[str] = ["""""".join(lowercase__ ) for row in temp_grid]
lowerCAmelCase_ :Dict = """""".join(lowercase__ )
return output_string
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :Union[str, Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
lowerCAmelCase_ :list[list[str]] = [[] for _ in range(lowercase__ )] # generates template
for position in range(len(lowercase__ ) ):
lowerCAmelCase_ :List[str] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_ :Optional[int] = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
lowerCAmelCase_ :int = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase_ :List[Any] = input_string[counter : counter + len(lowercase__ )]
grid.append(list(lowercase__ ) )
counter += len(lowercase__ )
lowerCAmelCase_ :str = """""" # reads as zigzag
for position in range(len(lowercase__ ) ):
lowerCAmelCase_ :List[str] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_ :List[Any] = min(lowercase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _snake_case ( lowercase__ : str ) -> dict[int, str]:
'''simple docstring'''
lowerCAmelCase_ :Any = {}
for key_guess in range(1 , len(lowercase__ ) ): # tries every key
lowerCAmelCase_ :List[str] = decrypt(lowercase__ , lowercase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=False , __A=True , __A=False , __A=True , __A=33 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> Dict:
lowerCAmelCase_ :Dict = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Tuple = seq_length
lowerCAmelCase_ :Optional[Any] = is_training
lowerCAmelCase_ :str = use_input_mask
lowerCAmelCase_ :Optional[int] = use_token_type_ids
lowerCAmelCase_ :List[str] = use_labels
lowerCAmelCase_ :Union[str, Any] = vocab_size
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Optional[Any] = num_hidden_layers
lowerCAmelCase_ :Dict = num_attention_heads
lowerCAmelCase_ :Dict = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :str = attention_probs_dropout_prob
lowerCAmelCase_ :Dict = max_position_embeddings
lowerCAmelCase_ :Optional[int] = type_vocab_size
lowerCAmelCase_ :Optional[int] = type_sequence_label_size
lowerCAmelCase_ :List[Any] = initializer_range
lowerCAmelCase_ :Union[str, Any] = num_labels
lowerCAmelCase_ :int = num_choices
lowerCAmelCase_ :Union[str, Any] = scope
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase_ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Optional[Any] = None
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :Any = None
if self.use_labels:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> Tuple:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> str:
lowerCAmelCase_ :str = EsmModel(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )
lowerCAmelCase_ :Any = model(__A )
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :List[Any] = EsmForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Optional[int] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> int:
lowerCAmelCase_ :List[Any] = self.num_labels
lowerCAmelCase_ :Optional[int] = EsmForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Optional[int] = config_and_inputs
lowerCAmelCase_ :Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = False
UpperCAmelCase_ :List[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ :Union[str, Any] = ()
UpperCAmelCase_ :List[str] = (
{
"feature-extraction": EsmModel,
"fill-mask": EsmForMaskedLM,
"text-classification": EsmForSequenceClassification,
"token-classification": EsmForTokenClassification,
"zero-shot": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Any = True
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = EsmModelTester(self )
lowerCAmelCase_ :List[str] = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ :List[Any] = type
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Any = EsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase_ :Dict = EsmEmbeddings(config=__A )
lowerCAmelCase_ :Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCAmelCase_ :str = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCAmelCase_ :List[Any] = create_position_ids_from_input_ids(__A , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__A , __A ) ) )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs()[0]
lowerCAmelCase_ :List[str] = EsmEmbeddings(config=__A )
lowerCAmelCase_ :int = torch.empty(2 , 4 , 30 )
lowerCAmelCase_ :Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCAmelCase_ :Dict = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCAmelCase_ :List[str] = embeddings.create_position_ids_from_inputs_embeds(__A )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__A , __A ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __lowerCAmelCase ( self ) -> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self ) -> str:
pass
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ ):
@slow
def __lowerCAmelCase ( self ) -> List[str]:
with torch.no_grad():
lowerCAmelCase_ :int = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCAmelCase_ :List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :Optional[int] = model(__A )[0]
lowerCAmelCase_ :Optional[Any] = 33
lowerCAmelCase_ :Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __A )
lowerCAmelCase_ :List[str] = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
with torch.no_grad():
lowerCAmelCase_ :Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCAmelCase_ :Dict = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase_ :Union[str, Any] = model(__A )[0]
# compare the actual values for a slice.
lowerCAmelCase_ :List[str] = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) )
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :List[Any] = StableDiffusionLDMaDPipeline
UpperCAmelCase_ :Dict = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase_ :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCAmelCase_ :Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :Tuple = CLIPTextModel(__A )
lowerCAmelCase_ :Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> int:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :List[str] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :str = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :int = self.get_dummy_components()
lowerCAmelCase_ :Tuple = StableDiffusionLDMaDPipeline(**__A )
lowerCAmelCase_ :Tuple = ldmad_pipe.to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = ldmad_pipe(**__A )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = output.rgb, output.depth
lowerCAmelCase_ :List[str] = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ :int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ :Any = np.array(
[0.3_7_3_3_8_1_7_6, 0.7_0_2_4_7, 0.7_4_2_0_3_1_9_3, 0.5_1_6_4_3_6_0_4, 0.5_8_2_5_6_7_9_3, 0.6_0_9_3_2_1_3_6, 0.4_1_8_1_0_9_5, 0.4_8_3_5_5_8_7_7, 0.4_6_5_3_5_2_6_2] )
lowerCAmelCase_ :Union[str, Any] = np.array([1_0_3.4_6_7_2_7, 8_5.8_1_2_0_0_4, 8_7.8_4_9_2_3_6] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = StableDiffusionLDMaDPipeline(**__A )
lowerCAmelCase_ :int = ldmad_pipe.to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = 3 * [inputs["""prompt"""]]
# forward
lowerCAmelCase_ :Any = ldmad_pipe(**__A )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = output.rgb, output.depth
lowerCAmelCase_ :int = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ :Optional[Any] = depth_slice_a[0, -3:, -1]
lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[Any] = 3 * [inputs.pop("""prompt""" )]
lowerCAmelCase_ :Dict = ldmad_pipe.tokenizer(
__A , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__A , return_tensors="""pt""" , )
lowerCAmelCase_ :Optional[int] = text_inputs["""input_ids"""].to(__A )
lowerCAmelCase_ :List[Any] = ldmad_pipe.text_encoder(__A )[0]
lowerCAmelCase_ :Any = prompt_embeds
# forward
lowerCAmelCase_ :Optional[int] = ldmad_pipe(**__A )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = output.rgb, output.depth
lowerCAmelCase_ :Tuple = rgb_slice_a[0, -3:, -3:, -1]
lowerCAmelCase_ :List[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Tuple = self.get_dummy_components()
lowerCAmelCase_ :List[Any] = PNDMScheduler(skip_prk_steps=__A )
lowerCAmelCase_ :Optional[Any] = StableDiffusionLDMaDPipeline(**__A )
lowerCAmelCase_ :str = ldmad_pipe.to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :str = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[Any] = """french fries"""
lowerCAmelCase_ :int = ldmad_pipe(**__A , negative_prompt=__A )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = output.rgb, output.depth
lowerCAmelCase_ :int = rgb[0, -3:, -3:, -1]
lowerCAmelCase_ :Dict = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
lowerCAmelCase_ :Tuple = np.array(
[0.3_7_0_4_4, 0.7_1_8_1_1_5_0_3, 0.7_2_2_3_2_5_1, 0.4_8_6_0_3_6_7_5, 0.5_6_3_8_3_9_1, 0.6_3_6_4_9_4_8, 0.4_2_8_3_3_7_0_4, 0.4_9_0_1_3_1_5, 0.4_7_9_2_6_2_1_7] )
lowerCAmelCase_ :Union[str, Any] = np.array([1_0_7.8_4_7_3_8, 8_4.6_2_8_0_2, 8_9.9_6_2_1_3_5] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , __A , __A="cpu" , __A=torch.floataa , __A=0 ) -> Dict:
lowerCAmelCase_ :List[str] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[str] = np.random.RandomState(__A ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ :Any = torch.from_numpy(__A ).to(device=__A , dtype=__A )
lowerCAmelCase_ :Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
lowerCAmelCase_ :Union[str, Any] = ldmad_pipe.to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = ldmad_pipe(**__A )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = output.rgb, output.depth
lowerCAmelCase_ :List[str] = rgb[0, -3:, -3:, -1].flatten()
lowerCAmelCase_ :Any = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
lowerCAmelCase_ :int = np.array(
[0.5_3_8_0_5_4_6_5, 0.5_6_7_0_7_3_0_5, 0.5_4_8_6_5_1_5, 0.5_7_0_1_2_2_3_6, 0.5_8_1_4_5_1_1, 0.5_6_2_5_3_4_8_7, 0.5_4_8_4_3_0_1_4, 0.5_5_0_9_2_2_6_3, 0.6_4_5_9_7_0_6] )
lowerCAmelCase_ :str = np.array(
[0.9_2_6_3_7_8_1, 0.6_6_7_8_6_7_2, 0.5_4_8_6_5_1_5, 0.9_2_2_0_2_1_4_5, 0.6_7_8_3_1_1_3_5, 0.5_6_2_5_3_4_8_7, 0.9_2_4_1_6_9_4, 0.7_5_5_1_4_7_8, 0.6_4_5_9_7_0_6] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self , __A , __A="cpu" , __A=torch.floataa , __A=0 ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :int = np.random.RandomState(__A ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ :List[Any] = torch.from_numpy(__A ).to(device=__A , dtype=__A )
lowerCAmelCase_ :Optional[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Dict = self.get_inputs(__A )
lowerCAmelCase_ :Any = ldmad_pipe(**__A )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = output.rgb, output.depth
lowerCAmelCase_ :int = 0.4_9_5_5_8_6
lowerCAmelCase_ :Optional[Any] = 0.3_3_7_9_5_5_1_5
lowerCAmelCase_ :int = 1_1_2.4_8_5_1_8
lowerCAmelCase_ :Any = 9_8.4_8_9_7_4_6
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__A )
ldmad_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Dict = self.get_inputs(__A )
lowerCAmelCase_ :List[str] = ldmad_pipe(**__A )
lowerCAmelCase_ , lowerCAmelCase_ :Any = output.rgb, output.depth
lowerCAmelCase_ :str = 0.4_1_9_4_1_2_7
lowerCAmelCase_ :List[Any] = 0.3_5_3_7_5_5_8_6
lowerCAmelCase_ :List[Any] = 0.5_6_3_8_5_0_2
lowerCAmelCase_ :List[Any] = 0.3_4_6_8_6_1_0_3
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 1 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
from statistics import mean
def _snake_case ( lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : int ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = [0] * no_of_processes
lowerCAmelCase_ :str = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(lowercase__ ):
lowerCAmelCase_ :List[Any] = burst_time[i]
lowerCAmelCase_ :list[int] = []
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :List[str] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :Optional[int] = -1
for i in range(lowercase__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(lowercase__ )
if len(lowercase__ ) > 0:
lowerCAmelCase_ :Any = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowerCAmelCase_ :Any = i
total_time += burst_time[target_process]
completed += 1
lowerCAmelCase_ :Union[str, Any] = 0
lowerCAmelCase_ :Optional[Any] = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _snake_case ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : list[int] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :str = [0] * no_of_processes
for i in range(lowercase__ ):
lowerCAmelCase_ :List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
__UpperCAmelCase = 4
__UpperCAmelCase = [2, 5, 3, 7]
__UpperCAmelCase = [0, 0, 0, 0]
__UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _snake_case ( lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = filter(lambda lowercase__ : p.requires_grad , model.parameters() )
lowerCAmelCase_ :Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> int:
'''simple docstring'''
if metric == "rouge2":
lowerCAmelCase_ :Any = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
lowerCAmelCase_ :Tuple = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
lowerCAmelCase_ :Dict = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
lowerCAmelCase_ :Optional[int] = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
lowerCAmelCase_ :List[Any] = ModelCheckpoint(
dirpath=lowercase__ , filename=lowercase__ , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=lowercase__ , verbose=lowercase__ , )
class _SCREAMING_SNAKE_CASE ( pl.Callback ):
def __lowerCAmelCase ( self , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Any = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__A )
@rank_zero_only
def __lowerCAmelCase ( self , __A , __A , __A , __A=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowerCAmelCase_ :Tuple = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
lowerCAmelCase_ :Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCAmelCase_ :int = od / """test_results.txt"""
lowerCAmelCase_ :Any = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCAmelCase_ :int = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowerCAmelCase_ :List[str] = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__A )
generations_file.parent.mkdir(exist_ok=__A )
with open(__A , """a+""" ) as writer:
for key in sorted(__A ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCAmelCase_ :Optional[int] = metrics[key]
if isinstance(__A , torch.Tensor ):
lowerCAmelCase_ :Dict = val.item()
lowerCAmelCase_ :int = f"""{key}: {val:.6f}\n"""
writer.write(__A )
if not save_generations:
return
if "preds" in metrics:
lowerCAmelCase_ :List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(__A )
@rank_zero_only
def __lowerCAmelCase ( self , __A , __A ) -> Union[str, Any]:
try:
lowerCAmelCase_ :Optional[int] = pl_module.model.model.num_parameters()
except AttributeError:
lowerCAmelCase_ :Any = pl_module.model.num_parameters()
lowerCAmelCase_ :int = count_trainable_parameters(__A )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self , __A , __A ) -> Any:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__A , __A , """test""" )
@rank_zero_only
def __lowerCAmelCase ( self , __A , __A ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__UpperCAmelCase = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'facebook/bart-base': 10_24,
'facebook/bart-large': 10_24,
'facebook/bart-large-mnli': 10_24,
'facebook/bart-large-cnn': 10_24,
'facebook/bart-large-xsum': 10_24,
'yjernite/bart_eli5': 10_24,
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ :List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :Tuple = ["input_ids", "attention_mask"]
UpperCAmelCase_ :Optional[Any] = BartTokenizer
def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ) -> Optional[int]:
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
lowerCAmelCase_ :int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
lowerCAmelCase_ :Tuple = getattr(__A , pre_tok_state.pop("""type""" ) )
lowerCAmelCase_ :int = add_prefix_space
lowerCAmelCase_ :Any = pre_tok_class(**__A )
lowerCAmelCase_ :Tuple = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase_ :Dict = """post_processor"""
lowerCAmelCase_ :str = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
lowerCAmelCase_ :int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ :Dict = tuple(state["""sep"""] )
if "cls" in state:
lowerCAmelCase_ :Tuple = tuple(state["""cls"""] )
lowerCAmelCase_ :Dict = False
if state.get("""add_prefix_space""" , __A ) != add_prefix_space:
lowerCAmelCase_ :Any = add_prefix_space
lowerCAmelCase_ :List[Any] = True
if state.get("""trim_offsets""" , __A ) != trim_offsets:
lowerCAmelCase_ :Optional[Any] = trim_offsets
lowerCAmelCase_ :Optional[int] = True
if changes_to_apply:
lowerCAmelCase_ :Tuple = getattr(__A , state.pop("""type""" ) )
lowerCAmelCase_ :int = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
def __lowerCAmelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
lowerCAmelCase_ :str = value
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :int = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :Dict = kwargs.get("""is_split_into_words""" , __A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
lowerCAmelCase_ :Optional[Any] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def __lowerCAmelCase ( self , __A , __A=None ) -> Any:
lowerCAmelCase_ :Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Dict = [self.sep_token_id]
lowerCAmelCase_ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Any:
lowerCAmelCase_ :Optional[Any] = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(__A ) != 0:
lowerCAmelCase_ :Optional[int] = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__A ) != cols:
raise error
for value in row:
if not isinstance(__A , (int, float) ):
raise error
lowerCAmelCase_ :Union[str, Any] = rows
else:
lowerCAmelCase_ :Union[str, Any] = []
def __lowerCAmelCase ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.rows )
@property
def __lowerCAmelCase ( self ) -> int:
return len(self.rows[0] )
@property
def __lowerCAmelCase ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def __lowerCAmelCase ( self ) -> bool:
return self.order[0] == self.order[1]
def __lowerCAmelCase ( self ) -> Matrix:
lowerCAmelCase_ :str = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__A )
def __lowerCAmelCase ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __lowerCAmelCase ( self ) -> bool:
return bool(self.determinant() )
def __lowerCAmelCase ( self , __A , __A ) -> int:
lowerCAmelCase_ :Optional[Any] = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__A ).determinant()
def __lowerCAmelCase ( self , __A , __A ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(__A , __A )
return -1 * self.get_minor(__A , __A )
def __lowerCAmelCase ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(__A , __A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __lowerCAmelCase ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __lowerCAmelCase ( self ) -> Matrix:
lowerCAmelCase_ :Tuple = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__A )
def __lowerCAmelCase ( self ) -> Matrix:
lowerCAmelCase_ :Tuple = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(__A ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def __lowerCAmelCase ( self , __A , __A = None ) -> None:
lowerCAmelCase_ :int = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(__A , __A ):
raise type_error
for value in row:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(__A )
else:
lowerCAmelCase_ :List[str] = self.rows[0:position] + [row] + self.rows[position:]
def __lowerCAmelCase ( self , __A , __A = None ) -> None:
lowerCAmelCase_ :List[str] = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(__A , __A ):
raise type_error
for value in column:
if not isinstance(__A , (int, float) ):
raise type_error
if len(__A ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
lowerCAmelCase_ :Any = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCAmelCase_ :Optional[Any] = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , __A ) -> bool:
if not isinstance(__A , __A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , __A ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , __A ) -> Matrix:
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , __A ) -> Matrix:
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , __A ) -> Matrix:
if isinstance(__A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__A , __A ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(__A , __A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , __A ) -> Matrix:
if not isinstance(__A , __A ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
lowerCAmelCase_ :Optional[Any] = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __lowerCAmelCase ( cls , __A , __A ) -> int:
return sum(row[i] * column[i] for i in range(len(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 1 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ :str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__A )
lowerCAmelCase_ :Tuple = -1
lowerCAmelCase_ :Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__A )
lowerCAmelCase_ :Any = model.generate(__A , max_new_tokens=10 , do_sample=__A )
lowerCAmelCase_ :List[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase_ :int = TextStreamer(__A )
model.generate(__A , max_new_tokens=10 , do_sample=__A , streamer=__A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase_ :Optional[int] = cs.out[:-1]
self.assertEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ :Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__A )
lowerCAmelCase_ :Optional[Any] = -1
lowerCAmelCase_ :Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__A )
lowerCAmelCase_ :List[str] = model.generate(__A , max_new_tokens=10 , do_sample=__A )
lowerCAmelCase_ :List[str] = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase_ :Dict = TextIteratorStreamer(__A )
lowerCAmelCase_ :Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase_ :int = Thread(target=model.generate , kwargs=__A )
thread.start()
lowerCAmelCase_ :Dict = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__A , __A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ :Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__A )
lowerCAmelCase_ :List[Any] = -1
lowerCAmelCase_ :str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__A )
lowerCAmelCase_ :Union[str, Any] = model.generate(__A , max_new_tokens=10 , do_sample=__A )
lowerCAmelCase_ :str = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase_ :List[Any] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase_ :Dict = TextStreamer(__A , skip_prompt=__A )
model.generate(__A , max_new_tokens=10 , do_sample=__A , streamer=__A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase_ :Any = cs.out[:-1]
self.assertEqual(__A , __A )
def __lowerCAmelCase ( self ) -> int:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase_ :Union[str, Any] = AutoTokenizer.from_pretrained("""distilgpt2""" )
lowerCAmelCase_ :Tuple = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(__A )
lowerCAmelCase_ :int = -1
lowerCAmelCase_ :Any = torch.ones((1, 5) , device=__A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase_ :Optional[int] = TextStreamer(__A , skip_special_tokens=__A )
model.generate(__A , max_new_tokens=1 , do_sample=__A , streamer=__A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase_ :Dict = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase_ :Any = tokenizer(__A , return_tensors="""pt""" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
lowerCAmelCase_ :List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(__A )
lowerCAmelCase_ :List[str] = -1
lowerCAmelCase_ :str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(__A )
lowerCAmelCase_ :str = TextIteratorStreamer(__A , timeout=0.0_0_1 )
lowerCAmelCase_ :int = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
lowerCAmelCase_ :Optional[int] = Thread(target=model.generate , kwargs=__A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__A ):
lowerCAmelCase_ :Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : list , lowercase__ : int | None = None , lowercase__ : int | None = None ) -> None:
'''simple docstring'''
if start is None:
lowerCAmelCase_ :str = 0
if end is None:
lowerCAmelCase_ :Dict = len(lowercase__ ) - 1
if start >= end:
return
lowerCAmelCase_ :List[str] = (start + end) // 2
slowsort(lowercase__ , lowercase__ , lowercase__ )
slowsort(lowercase__ , mid + 1 , lowercase__ )
if sequence[end] < sequence[mid]:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = sequence[mid], sequence[end]
slowsort(lowercase__ , lowercase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :int = state_dict.pop(lowercase__ )
lowerCAmelCase_ :Tuple = val
def _snake_case ( lowercase__ : Optional[int] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCAmelCase_ :Dict = value
else:
lowerCAmelCase_ :Optional[Any] = value
return new_state_dict
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = """"""
if is_panoptic:
lowerCAmelCase_ :Optional[int] = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase_ :Tuple = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowerCAmelCase_ :Tuple = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ :int = in_proj_weight[:2_5_6, :]
lowerCAmelCase_ :int = in_proj_bias[:2_5_6]
lowerCAmelCase_ :int = in_proj_weight[2_5_6:5_1_2, :]
lowerCAmelCase_ :Any = in_proj_bias[2_5_6:5_1_2]
lowerCAmelCase_ :Dict = in_proj_weight[-2_5_6:, :]
lowerCAmelCase_ :Any = in_proj_bias[-2_5_6:]
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Dict = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase__ : str , lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCAmelCase_ :Dict = """resnet101"""
if "dc5" in model_name:
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :int = """panoptic""" in model_name
if is_panoptic:
lowerCAmelCase_ :int = 2_5_0
else:
lowerCAmelCase_ :Any = 9_1
lowerCAmelCase_ :Any = """huggingface/label-files"""
lowerCAmelCase_ :Any = """coco-detection-id2label.json"""
lowerCAmelCase_ :Optional[Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :List[Any] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ :Optional[Any] = idalabel
lowerCAmelCase_ :str = {v: k for k, v in idalabel.items()}
# load image processor
lowerCAmelCase_ :List[Any] = """coco_panoptic""" if is_panoptic else """coco_detection"""
lowerCAmelCase_ :Optional[int] = ConditionalDetrImageProcessor(format=lowercase__ )
# prepare image
lowerCAmelCase_ :Dict = prepare_img()
lowerCAmelCase_ :Union[str, Any] = image_processor(images=lowercase__ , return_tensors="""pt""" )
lowerCAmelCase_ :List[str] = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
lowerCAmelCase_ :Optional[int] = torch.hub.load("""DeppMeng/ConditionalDETR""" , lowercase__ , pretrained=lowercase__ ).eval()
lowerCAmelCase_ :Optional[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCAmelCase_ :List[Any] = """conditional_detr.""" + src
rename_key(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :List[str] = rename_backbone_keys(lowercase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase__ , is_panoptic=lowercase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase_ :List[str] = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
lowerCAmelCase_ :List[str] = state_dict.pop(lowercase__ )
lowerCAmelCase_ :Any = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCAmelCase_ :Dict = state_dict.pop(lowercase__ )
lowerCAmelCase_ :Optional[int] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
lowerCAmelCase_ :Optional[Any] = state_dict.pop(lowercase__ )
lowerCAmelCase_ :Optional[int] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCAmelCase_ :str = state_dict.pop(lowercase__ )
lowerCAmelCase_ :Tuple = val
# finally, create HuggingFace model and load state dict
lowerCAmelCase_ :Optional[int] = ConditionalDetrForSegmentation(lowercase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
model.push_to_hub(repo_id=lowercase__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
lowerCAmelCase_ :int = conditional_detr(lowercase__ )
lowerCAmelCase_ :str = model(lowercase__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = data
lowerCAmelCase_ :Dict = None
lowerCAmelCase_ :Optional[int] = None
def _snake_case ( ) -> TreeNode:
'''simple docstring'''
print("""\n********Press N to stop entering at any point of time********\n""" )
lowerCAmelCase_ :Dict = input("""Enter the value of the root node: """ ).strip().lower()
lowerCAmelCase_ :queue.Queue = queue.Queue()
lowerCAmelCase_ :Tuple = TreeNode(int(lowercase__ ) )
q.put(lowercase__ )
while not q.empty():
lowerCAmelCase_ :int = q.get()
lowerCAmelCase_ :List[str] = f"""Enter the left node of {node_found.data}: """
lowerCAmelCase_ :Optional[int] = input(lowercase__ ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase_ :Optional[Any] = TreeNode(int(lowercase__ ) )
lowerCAmelCase_ :int = left_node
q.put(lowercase__ )
lowerCAmelCase_ :List[str] = f"""Enter the right node of {node_found.data}: """
lowerCAmelCase_ :Optional[int] = input(lowercase__ ).strip().lower() or """n"""
if check == "n":
return tree_node
lowerCAmelCase_ :str = TreeNode(int(lowercase__ ) )
lowerCAmelCase_ :Optional[Any] = right_node
q.put(lowercase__ )
raise
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ :queue.Queue = queue.Queue()
q.put(lowercase__ )
while not q.empty():
lowerCAmelCase_ :Union[str, Any] = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ :queue.Queue = queue.Queue()
q.put(lowercase__ )
while not q.empty():
lowerCAmelCase_ :List[str] = []
while not q.empty():
lowerCAmelCase_ :Tuple = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowercase__ )
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ :list[TreeNode] = []
lowerCAmelCase_ :Tuple = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(lowercase__ )
lowerCAmelCase_ :Optional[int] = n.left
# end of while means current node doesn't have left child
lowerCAmelCase_ :List[Any] = stack.pop()
# start to traverse its right child
lowerCAmelCase_ :Union[str, Any] = n.right
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ :list[TreeNode] = []
lowerCAmelCase_ :Union[str, Any] = node
while n or stack:
while n:
stack.append(lowercase__ )
lowerCAmelCase_ :List[str] = n.left
lowerCAmelCase_ :Any = stack.pop()
print(n.data , end=""",""" )
lowerCAmelCase_ :Any = n.right
def _snake_case ( lowercase__ : TreeNode ) -> None:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
lowerCAmelCase_ , lowerCAmelCase_ :Dict = [], []
lowerCAmelCase_ :Optional[Any] = node
stacka.append(lowercase__ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCAmelCase_ :Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowercase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def _snake_case ( lowercase__ : str = "" , lowercase__ : Union[str, Any]=5_0 , lowercase__ : int="*" ) -> str:
'''simple docstring'''
if not s:
return "\n" + width * char
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = divmod(width - len(lowercase__ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
__UpperCAmelCase = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : list , lowercase__ : int ) -> Optional[int]:
'''simple docstring'''
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def _snake_case ( lowercase__ : list , lowercase__ : int ) -> int:
'''simple docstring'''
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase_ , lowerCAmelCase_ :int = (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter integers separated by spaces: ')
__UpperCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 1 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = []
def _snake_case ( lowercase__ : list[list[int]] , lowercase__ : int , lowercase__ : int ) -> bool:
'''simple docstring'''
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def _snake_case ( lowercase__ : list[list[int]] , lowercase__ : int ) -> bool:
'''simple docstring'''
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase_ :Optional[Any] = 1
solve(lowercase__ , row + 1 )
lowerCAmelCase_ :str = 0
return False
def _snake_case ( lowercase__ : list[list[int]] ) -> None:
'''simple docstring'''
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
__UpperCAmelCase = 8
__UpperCAmelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 1 |
"""simple docstring"""
def _snake_case ( ) -> int:
'''simple docstring'''
return 1
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowercase__ )
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(lowercase__ )
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(lowercase__ )
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(lowercase__ )
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(lowercase__ )
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(lowercase__ )
def _snake_case ( lowercase__ : int = 2_0_0 ) -> int:
'''simple docstring'''
return two_pound(lowercase__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 1 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass(frozen=A__ )
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str
UpperCAmelCase_ :str
UpperCAmelCase_ :Optional[str] = None
UpperCAmelCase_ :Optional[str] = None
UpperCAmelCase_ :Optional[str] = None
@dataclass(frozen=A__ )
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :List[int]
UpperCAmelCase_ :Optional[List[int]] = None
UpperCAmelCase_ :Optional[List[int]] = None
UpperCAmelCase_ :Optional[Union[int, float]] = None
UpperCAmelCase_ :Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[InputFeatures]
def __init__( self , __A , __A , __A , __A = None , __A=False , __A = False , ) -> Tuple:
lowerCAmelCase_ :Tuple = hans_processors[task]()
lowerCAmelCase_ :Dict = os.path.join(
__A , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(__A ) , __A , ) , )
lowerCAmelCase_ :List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ , lowerCAmelCase_ :Any = label_list[2], label_list[1]
lowerCAmelCase_ :Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase_ :int = cached_features_file + """.lock"""
with FileLock(__A ):
if os.path.exists(__A ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowerCAmelCase_ :Optional[int] = torch.load(__A )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowerCAmelCase_ :Union[str, Any] = (
processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
)
logger.info("""Training examples: %s""" , len(__A ) )
lowerCAmelCase_ :List[str] = hans_convert_examples_to_features(__A , __A , __A , __A )
logger.info("""Saving features into cached file %s""" , __A )
torch.save(self.features , __A )
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , __A ) -> InputFeatures:
return self.features[i]
def __lowerCAmelCase ( self ) -> Optional[Any]:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :List[InputFeatures]
def __init__( self , __A , __A , __A , __A = 128 , __A=False , __A = False , ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = hans_processors[task]()
lowerCAmelCase_ :int = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = label_list[2], label_list[1]
lowerCAmelCase_ :List[str] = label_list
lowerCAmelCase_ :Optional[int] = processor.get_dev_examples(__A ) if evaluate else processor.get_train_examples(__A )
lowerCAmelCase_ :List[str] = hans_convert_examples_to_features(__A , __A , __A , __A )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(__A )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase_ :Union[str, Any] = tf.data.Dataset.from_generator(
__A , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def __lowerCAmelCase ( self ) -> int:
return self.dataset
def __len__( self ) -> List[str]:
return len(self.features )
def __getitem__( self , __A ) -> InputFeatures:
return self.features[i]
def __lowerCAmelCase ( self ) -> int:
return self.label_list
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
return self._create_examples(self._read_tsv(os.path.join(__A , """heuristics_train_set.txt""" ) ) , """train""" )
def __lowerCAmelCase ( self , __A ) -> str:
return self._create_examples(self._read_tsv(os.path.join(__A , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return ["contradiction", "entailment", "neutral"]
def __lowerCAmelCase ( self , __A , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = []
for i, line in enumerate(__A ):
if i == 0:
continue
lowerCAmelCase_ :Union[str, Any] = """%s-%s""" % (set_type, line[0])
lowerCAmelCase_ :Dict = line[5]
lowerCAmelCase_ :str = line[6]
lowerCAmelCase_ :Union[str, Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowerCAmelCase_ :Dict = line[0]
examples.append(InputExample(guid=__A , text_a=__A , text_b=__A , label=__A , pairID=__A ) )
return examples
def _snake_case ( lowercase__ : List[InputExample] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : PreTrainedTokenizer , ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = {label: i for i, label in enumerate(lowercase__ )}
lowerCAmelCase_ :List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(lowercase__ ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowerCAmelCase_ :Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowercase__ , max_length=lowercase__ , padding="""max_length""" , truncation=lowercase__ , return_overflowing_tokens=lowercase__ , )
lowerCAmelCase_ :List[str] = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase_ :Optional[int] = int(example.pairID )
features.append(InputFeatures(**lowercase__ , label=lowercase__ , pairID=lowercase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__UpperCAmelCase = {
'hans': 3,
}
__UpperCAmelCase = {
'hans': HansProcessor,
}
| 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MaskFormerFeatureExtractor']
__UpperCAmelCase = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__UpperCAmelCase = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCAmelCase = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 10_00,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 10_00,
'block_out_channels': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'sample_size': 2_56,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__UpperCAmelCase = {
'num_train_timesteps': 40,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCAmelCase = {
'num_train_timesteps': 2_01,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
__UpperCAmelCase = {
'num_train_timesteps': 1_51,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
def _snake_case ( lowercase__ : Tuple ) -> Tuple:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Any , lowercase__ : List[str]=False ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = checkpoint[f"""{old_prefix}.in_layers.0.weight"""]
lowerCAmelCase_ :Dict = checkpoint[f"""{old_prefix}.in_layers.0.bias"""]
lowerCAmelCase_ :Dict = checkpoint[f"""{old_prefix}.in_layers.2.weight"""]
lowerCAmelCase_ :int = checkpoint[f"""{old_prefix}.in_layers.2.bias"""]
lowerCAmelCase_ :Optional[int] = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""]
lowerCAmelCase_ :List[str] = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""]
lowerCAmelCase_ :List[str] = checkpoint[f"""{old_prefix}.out_layers.0.weight"""]
lowerCAmelCase_ :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""]
lowerCAmelCase_ :List[str] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""]
lowerCAmelCase_ :int = checkpoint[f"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
lowerCAmelCase_ :Optional[int] = checkpoint[f"""{old_prefix}.skip_connection.weight"""]
lowerCAmelCase_ :str = checkpoint[f"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : Dict=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
lowerCAmelCase_ :Optional[int] = checkpoint[f"""{old_prefix}.norm.weight"""]
lowerCAmelCase_ :str = checkpoint[f"""{old_prefix}.norm.bias"""]
lowerCAmelCase_ :Any = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :List[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :Tuple = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCAmelCase_ :Optional[Any] = (
checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
lowerCAmelCase_ :Union[str, Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _snake_case ( lowercase__ : str , lowercase__ : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = torch.load(lowercase__ , map_location="""cpu""" )
lowerCAmelCase_ :Optional[Any] = {}
lowerCAmelCase_ :List[Any] = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase_ :List[str] = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase_ :List[str] = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase_ :Any = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCAmelCase_ :Union[str, Any] = checkpoint["""label_emb.weight"""]
lowerCAmelCase_ :Dict = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase_ :Optional[int] = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase_ :Any = unet_config["""down_block_types"""]
lowerCAmelCase_ :Any = unet_config["""layers_per_block"""]
lowerCAmelCase_ :Optional[Any] = unet_config["""attention_head_dim"""]
lowerCAmelCase_ :str = unet_config["""block_out_channels"""]
lowerCAmelCase_ :Union[str, Any] = 1
lowerCAmelCase_ :Tuple = channels_list[0]
for i, layer_type in enumerate(lowercase__ ):
lowerCAmelCase_ :List[str] = channels_list[i]
lowerCAmelCase_ :Optional[int] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowercase__ ):
lowerCAmelCase_ :List[Any] = f"""down_blocks.{i}.resnets.{j}"""
lowerCAmelCase_ :int = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase_ :Any = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ :Optional[int] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowercase__ ):
lowerCAmelCase_ :List[Any] = f"""down_blocks.{i}.resnets.{j}"""
lowerCAmelCase_ :List[str] = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase_ :Dict = True if j == 0 and downsample_block_has_skip else False
lowerCAmelCase_ :Any = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
lowerCAmelCase_ :List[Any] = f"""down_blocks.{i}.attentions.{j}"""
lowerCAmelCase_ :Tuple = f"""input_blocks.{current_layer}.1"""
lowerCAmelCase_ :Dict = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
lowerCAmelCase_ :Tuple = f"""down_blocks.{i}.downsamplers.0"""
lowerCAmelCase_ :List[Any] = f"""input_blocks.{current_layer}.0"""
lowerCAmelCase_ :List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
lowerCAmelCase_ :Any = current_channels
# hardcoded the mid-block for now
lowerCAmelCase_ :int = """mid_block.resnets.0"""
lowerCAmelCase_ :int = """middle_block.0"""
lowerCAmelCase_ :Union[str, Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :List[str] = """mid_block.attentions.0"""
lowerCAmelCase_ :Tuple = """middle_block.1"""
lowerCAmelCase_ :List[Any] = convert_attention(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """mid_block.resnets.1"""
lowerCAmelCase_ :str = """middle_block.2"""
lowerCAmelCase_ :List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[Any] = 0
lowerCAmelCase_ :Union[str, Any] = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowercase__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ :Any = f"""up_blocks.{i}.resnets.{j}"""
lowerCAmelCase_ :Optional[Any] = f"""output_blocks.{current_layer}.0"""
lowerCAmelCase_ :Union[str, Any] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
lowerCAmelCase_ :Any = f"""up_blocks.{i}.upsamplers.0"""
lowerCAmelCase_ :Optional[int] = f"""output_blocks.{current_layer-1}.1"""
lowerCAmelCase_ :Tuple = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCAmelCase_ :Optional[int] = f"""up_blocks.{i}.resnets.{j}"""
lowerCAmelCase_ :Optional[int] = f"""output_blocks.{current_layer}.0"""
lowerCAmelCase_ :Any = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ , has_skip=lowercase__ )
lowerCAmelCase_ :int = f"""up_blocks.{i}.attentions.{j}"""
lowerCAmelCase_ :Optional[Any] = f"""output_blocks.{current_layer}.1"""
lowerCAmelCase_ :int = convert_attention(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
current_layer += 1
if i != len(lowercase__ ) - 1:
lowerCAmelCase_ :List[str] = f"""up_blocks.{i}.upsamplers.0"""
lowerCAmelCase_ :Tuple = f"""output_blocks.{current_layer-1}.2"""
lowerCAmelCase_ :List[str] = convert_resnet(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[Any] = checkpoint["""out.0.weight"""]
lowerCAmelCase_ :Optional[Any] = checkpoint["""out.0.bias"""]
lowerCAmelCase_ :Optional[int] = checkpoint["""out.2.weight"""]
lowerCAmelCase_ :Optional[Any] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = strabool(args.class_cond)
__UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__UpperCAmelCase = None
__UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
__UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
import pprint
import requests
__UpperCAmelCase = 'https://zenquotes.io/api'
def _snake_case ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _snake_case ( ) -> list:
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__UpperCAmelCase = random_quotes()
pprint.pprint(response)
| 1 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception("""Principal borrowed must be > 0""" )
if rate_per_annum < 0:
raise Exception("""Rate of interest must be >= 0""" )
if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ):
raise Exception("""Years to repay must be an integer > 0""" )
# Yearly rate is divided by 12 to get monthly rate
lowerCAmelCase_ :Any = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
lowerCAmelCase_ :Optional[int] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any]=1 ) -> List[str]:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int]=0 ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = []
for old_item in old_list:
lowerCAmelCase_ :Dict = old_item.replace("""in_layers.0""" , """norm1""" )
lowerCAmelCase_ :Dict = new_item.replace("""in_layers.2""" , """conv1""" )
lowerCAmelCase_ :Union[str, Any] = new_item.replace("""out_layers.0""" , """norm2""" )
lowerCAmelCase_ :str = new_item.replace("""out_layers.3""" , """conv2""" )
lowerCAmelCase_ :List[str] = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
lowerCAmelCase_ :Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
lowerCAmelCase_ :Dict = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _snake_case ( lowercase__ : Any , lowercase__ : List[str]=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = []
for old_item in old_list:
lowerCAmelCase_ :Optional[int] = old_item
lowerCAmelCase_ :Any = new_item.replace("""norm.weight""" , """group_norm.weight""" )
lowerCAmelCase_ :Optional[int] = new_item.replace("""norm.bias""" , """group_norm.bias""" )
lowerCAmelCase_ :Union[str, Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
lowerCAmelCase_ :int = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
lowerCAmelCase_ :Optional[Any] = shave_segments(lowercase__ , n_shave_prefix_segments=lowercase__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _snake_case ( lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCAmelCase_ :List[Any] = old_checkpoint[path]
lowerCAmelCase_ :Optional[Any] = old_tensor.shape[0] // 3
lowerCAmelCase_ :Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCAmelCase_ :int = old_tensor.shape[0] // config["""num_head_channels"""] // 3
lowerCAmelCase_ :Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = old_tensor.split(channels // num_heads , dim=1 )
lowerCAmelCase_ :str = query.reshape(lowercase__ )
lowerCAmelCase_ :int = key.reshape(lowercase__ )
lowerCAmelCase_ :List[str] = value.reshape(lowercase__ )
for path in paths:
lowerCAmelCase_ :int = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCAmelCase_ :Tuple = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
lowerCAmelCase_ :Dict = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
lowerCAmelCase_ :int = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCAmelCase_ :List[Any] = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCAmelCase_ :Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
lowerCAmelCase_ :str = old_checkpoint[path["""old"""]]
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = {}
lowerCAmelCase_ :List[Any] = checkpoint["""time_embed.0.weight"""]
lowerCAmelCase_ :Tuple = checkpoint["""time_embed.0.bias"""]
lowerCAmelCase_ :int = checkpoint["""time_embed.2.weight"""]
lowerCAmelCase_ :Optional[int] = checkpoint["""time_embed.2.bias"""]
lowerCAmelCase_ :Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCAmelCase_ :Tuple = checkpoint["""input_blocks.0.0.bias"""]
lowerCAmelCase_ :Tuple = checkpoint["""out.0.weight"""]
lowerCAmelCase_ :str = checkpoint["""out.0.bias"""]
lowerCAmelCase_ :Dict = checkpoint["""out.2.weight"""]
lowerCAmelCase_ :int = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
lowerCAmelCase_ :Dict = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
lowerCAmelCase_ :Optional[int] = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the middle blocks only
lowerCAmelCase_ :Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
lowerCAmelCase_ :Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
# Retrieves the keys for the output blocks only
lowerCAmelCase_ :List[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
lowerCAmelCase_ :Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowercase__ )
}
for i in range(1 , lowercase__ ):
lowerCAmelCase_ :Any = (i - 1) // (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :Any = (i - 1) % (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
lowerCAmelCase_ :Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
lowerCAmelCase_ :Tuple = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
lowerCAmelCase_ :Optional[Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
lowerCAmelCase_ :Tuple = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :int = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
lowerCAmelCase_ :Dict = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path, resnet_op] , config=lowercase__ )
if len(lowercase__ ):
lowerCAmelCase_ :Optional[Any] = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :List[Any] = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase_ :List[str] = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=lowercase__ , config=lowercase__ , )
lowerCAmelCase_ :List[str] = middle_blocks[0]
lowerCAmelCase_ :int = middle_blocks[1]
lowerCAmelCase_ :Tuple = middle_blocks[2]
lowerCAmelCase_ :Dict = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
lowerCAmelCase_ :List[str] = renew_resnet_paths(lowercase__ )
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , config=lowercase__ )
lowerCAmelCase_ :Optional[int] = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :List[Any] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , attention_paths_to_split=lowercase__ , config=lowercase__ )
for i in range(lowercase__ ):
lowerCAmelCase_ :str = i // (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :str = i % (config["""num_res_blocks"""] + 1)
lowerCAmelCase_ :List[Any] = [shave_segments(lowercase__ , 2 ) for name in output_blocks[i]]
lowerCAmelCase_ :Optional[Any] = {}
for layer in output_block_layers:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = layer.split(""".""" )[0], shave_segments(lowercase__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowercase__ )
else:
lowerCAmelCase_ :Dict = [layer_name]
if len(lowercase__ ) > 1:
lowerCAmelCase_ :str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
lowerCAmelCase_ :Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
lowerCAmelCase_ :List[str] = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :Optional[Any] = renew_resnet_paths(lowercase__ )
lowerCAmelCase_ :List[str] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , config=lowercase__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCAmelCase_ :int = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
lowerCAmelCase_ :int = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
lowerCAmelCase_ :Optional[Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowercase__ ) == 2:
lowerCAmelCase_ :str = []
if len(lowercase__ ):
lowerCAmelCase_ :List[Any] = renew_attention_paths(lowercase__ )
lowerCAmelCase_ :int = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
lowerCAmelCase_ :Optional[int] = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowercase__ , lowercase__ , lowercase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=lowercase__ , )
else:
lowerCAmelCase_ :List[str] = renew_resnet_paths(lowercase__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCAmelCase_ :Optional[int] = """.""".join(["""output_blocks""", str(lowercase__ ), path["""old"""]] )
lowerCAmelCase_ :Optional[int] = """.""".join(["""up_blocks""", str(lowercase__ ), """resnets""", str(lowercase__ ), path["""new"""]] )
lowerCAmelCase_ :Optional[int] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCAmelCase = json.loads(f.read())
__UpperCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCAmelCase = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
__UpperCAmelCase = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
__UpperCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 1 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=lowercase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=lowercase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=lowercase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=lowercase__ , default=1_0_0_0 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=lowercase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=lowercase__ , type=lowercase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=lowercase__ , default=5_1_2 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=lowercase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCAmelCase_ :Union[str, Any] = parser.parse_args()
return args
def _snake_case ( lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
def fn(lowercase__ : Tuple ):
return tokenizer(examples["""text"""] )
return fn
def _snake_case ( lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCAmelCase_ :Union[str, Any] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCAmelCase_ :Optional[Any] = tf.train.Features(feature=lowercase__ )
lowerCAmelCase_ :str = tf.train.Example(features=lowercase__ )
lowerCAmelCase_ :Dict = example.SerializeToString()
records.append(lowercase__ )
return records
def _snake_case ( lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase_ :Any = min(len(lowercase__ ) , args.limit )
lowerCAmelCase_ :List[str] = dataset.select(range(lowercase__ ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
lowerCAmelCase_ :str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase_ :Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
else:
lowerCAmelCase_ :int = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase_ :int = tokenize_function(lowercase__ )
lowerCAmelCase_ :str = dataset.map(lowercase__ , batched=lowercase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase__ : Tuple ):
# Concatenate all texts.
lowerCAmelCase_ :Union[str, Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase_ :Dict = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase_ :List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase_ :List[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase_ :List[Any] = dataset_tokenized.map(lowercase__ , batched=lowercase__ , batch_size=1_0_0_0 , num_proc=4 )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[Any] = 0
for shard in range(0 , len(lowercase__ ) , args.shard_size ):
lowerCAmelCase_ :int = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase_ :int = len(dataset_snapshot["""input_ids"""] )
lowerCAmelCase_ :Dict = os.path.join(lowercase__ , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowerCAmelCase_ :Dict = get_serialized_examples(lowercase__ )
with tf.io.TFRecordWriter(lowercase__ ) as out_file:
for i in range(len(lowercase__ ) ):
lowerCAmelCase_ :Union[str, Any] = serialized_examples[i]
out_file.write(lowercase__ )
print("""Wrote file {} containing {} records""".format(lowercase__ , lowercase__ ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , """w""" ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = parse_args()
main(args)
| 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__UpperCAmelCase = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__UpperCAmelCase = [ord(letter) for letter in string.ascii_lowercase]
__UpperCAmelCase = {ord(char) for char in VALID_CHARS}
__UpperCAmelCase = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _snake_case ( lowercase__ : list[int] , lowercase__ : tuple[int, ...] ) -> str | None:
'''simple docstring'''
lowerCAmelCase_ :str = ""
lowerCAmelCase_ :int
lowerCAmelCase_ :int
lowerCAmelCase_ :int
for keychar, cipherchar in zip(cycle(lowercase__ ) , lowercase__ ):
lowerCAmelCase_ :Optional[int] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(lowercase__ )
return decoded
def _snake_case ( lowercase__ : list[int] ) -> list[str]:
'''simple docstring'''
lowerCAmelCase_ :list[str] = []
for key in product(lowercase__ , repeat=3 ):
lowerCAmelCase_ :List[Any] = try_key(lowercase__ , lowercase__ )
if encoded is not None:
possibles.append(lowercase__ )
return possibles
def _snake_case ( lowercase__ : list[str] , lowercase__ : str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def _snake_case ( lowercase__ : str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :list[int]
lowerCAmelCase_ :list[str]
lowerCAmelCase_ :str
lowerCAmelCase_ :str
lowerCAmelCase_ :str = Path(lowercase__ ).parent.joinpath(lowercase__ ).read_text(encoding="""utf-8""" )
lowerCAmelCase_ :List[str] = [int(lowercase__ ) for number in data.strip().split(""",""" )]
lowerCAmelCase_ :Dict = filter_valid_chars(lowercase__ )
for common_word in COMMON_WORDS:
lowerCAmelCase_ :Union[str, Any] = filter_common_word(lowercase__ , lowercase__ )
if len(lowercase__ ) == 1:
break
lowerCAmelCase_ :Optional[int] = possibles[0]
return sum(ord(lowercase__ ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = RobertaPreLayerNormConfig.from_pretrained(
lowercase__ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
lowerCAmelCase_ :str = torch.load(hf_hub_download(repo_id=lowercase__ , filename="""pytorch_model.bin""" ) )
lowerCAmelCase_ :Dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
lowerCAmelCase_ :List[Any] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
lowerCAmelCase_ :Dict = tensor_value
lowerCAmelCase_ :Any = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowercase__ , config=lowercase__ , state_dict=lowercase__ )
model.save_pretrained(lowercase__ )
# convert tokenizer
lowerCAmelCase_ :Optional[int] = AutoTokenizer.from_pretrained(lowercase__ )
tokenizer.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__UpperCAmelCase = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = "albert"
def __init__( self , __A=3_0000 , __A=128 , __A=4096 , __A=12 , __A=1 , __A=64 , __A=1_6384 , __A=1 , __A="gelu_new" , __A=0 , __A=0 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=0.1 , __A="absolute" , __A=0 , __A=2 , __A=3 , **__A , ) -> str:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Tuple = vocab_size
lowerCAmelCase_ :Any = embedding_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :Optional[Any] = num_hidden_layers
lowerCAmelCase_ :Optional[int] = num_hidden_groups
lowerCAmelCase_ :List[str] = num_attention_heads
lowerCAmelCase_ :Optional[Any] = inner_group_num
lowerCAmelCase_ :Tuple = hidden_act
lowerCAmelCase_ :List[Any] = intermediate_size
lowerCAmelCase_ :int = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :Any = max_position_embeddings
lowerCAmelCase_ :int = type_vocab_size
lowerCAmelCase_ :Optional[int] = initializer_range
lowerCAmelCase_ :int = layer_norm_eps
lowerCAmelCase_ :Any = classifier_dropout_prob
lowerCAmelCase_ :str = position_embedding_type
class _SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ :Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ :str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = "funnel"
UpperCAmelCase_ :Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , __A=3_0522 , __A=[4, 4, 4] , __A=None , __A=2 , __A=768 , __A=12 , __A=64 , __A=3072 , __A="gelu_new" , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.1 , __A=None , __A=1E-9 , __A="mean" , __A="relative_shift" , __A=True , __A=True , __A=True , **__A , ) -> List[Any]:
lowerCAmelCase_ :int = vocab_size
lowerCAmelCase_ :List[str] = block_sizes
lowerCAmelCase_ :List[str] = [1] * len(__A ) if block_repeats is None else block_repeats
assert len(__A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
lowerCAmelCase_ :Tuple = num_decoder_layers
lowerCAmelCase_ :Dict = d_model
lowerCAmelCase_ :str = n_head
lowerCAmelCase_ :Dict = d_head
lowerCAmelCase_ :Optional[int] = d_inner
lowerCAmelCase_ :int = hidden_act
lowerCAmelCase_ :List[Any] = hidden_dropout
lowerCAmelCase_ :int = attention_dropout
lowerCAmelCase_ :Optional[Any] = activation_dropout
lowerCAmelCase_ :List[Any] = initializer_range
lowerCAmelCase_ :int = initializer_std
lowerCAmelCase_ :Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
lowerCAmelCase_ :Dict = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
lowerCAmelCase_ :str = attention_type
lowerCAmelCase_ :str = separate_cls
lowerCAmelCase_ :List[Any] = truncate_seq
lowerCAmelCase_ :Tuple = pool_q_only
super().__init__(**__A )
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowerCAmelCase ( self , __A ) -> int:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
return len(self.block_sizes )
@num_blocks.setter
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :str = GPTSanJapaneseTokenizer
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[int] = {"do_clean_text": False, "add_prefix_space": False}
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# fmt: off
lowerCAmelCase_ :Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase_ :List[str] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCAmelCase_ :int = {"""unk_token""": """<unk>"""}
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__A ) )
def __lowerCAmelCase ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__A )
def __lowerCAmelCase ( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.get_input_output_texts(__A )
lowerCAmelCase_ :List[str] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCAmelCase_ :str = tokenizer.decode(__A , clean_up_tokenization_spaces=__A )
return text, ids
def __lowerCAmelCase ( self ) -> str:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> int:
pass # TODO add if relevant
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCAmelCase_ :Any = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCAmelCase_ :Tuple = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
lowerCAmelCase_ :List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
lowerCAmelCase_ :Any = tokens + [tokenizer.unk_token]
lowerCAmelCase_ :Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase_ :Optional[int] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCAmelCase_ :str = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCAmelCase_ :str = tokenizer.encode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Any = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase_ :List[str] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCAmelCase_ :int = tokenizer.encode(__A , prefix_text=__A )
lowerCAmelCase_ :int = tokenizer.decode(__A )
lowerCAmelCase_ :Dict = tokenizer.decode(__A )
lowerCAmelCase_ :Tuple = tokenizer.decode(__A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
self.assertEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCAmelCase_ :List[Any] = """こんにちは、世界。"""
lowerCAmelCase_ :Optional[int] = """こんばんは、㔺界。😀"""
lowerCAmelCase_ :List[str] = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :Dict = len(tokenizer.encode(__A ) ) - 2
lowerCAmelCase_ :int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase_ :List[Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase_ :Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase_ :List[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[str] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase_ :List[Any] = tokenizer(__A , prefix_text=__A ).token_type_ids
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
self.assertListEqual(__A , __A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = tokenizer.encode("""あンいワ""" )
lowerCAmelCase_ :Optional[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCAmelCase_ :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertEqual(tokenizer.decode(__A ) , tokenizer.decode(__A ) )
self.assertNotEqual(__A , __A )
self.assertNotEqual(__A , __A )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCAmelCase_ :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCAmelCase_ :Dict = tokenizer(__A , padding=__A )
lowerCAmelCase_ :Any = tokenizer.batch_encode_plus(__A , padding=__A )
# fmt: off
lowerCAmelCase_ :int = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowerCAmelCase_ :List[str] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase_ :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __A )
self.assertListEqual(x_token.token_type_ids , __A )
self.assertListEqual(x_token.attention_mask , __A )
self.assertListEqual(x_token_a.input_ids , __A )
self.assertListEqual(x_token_a.token_type_ids , __A )
self.assertListEqual(x_token_a.attention_mask , __A )
def __lowerCAmelCase ( self ) -> Tuple:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def __lowerCAmelCase ( self ) -> str:
# tokenizer has no padding token
pass
| 1 | 1 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
__UpperCAmelCase = 3_00 # TEMPERATURE (unit = K)
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> float:
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> str:
'''simple docstring'''
plt.scatter(lowercase__ , lowercase__ , color="""red""" )
plt.plot(lowercase__ , pol_reg.predict(poly_reg.fit_transform(lowercase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 1 | 1 |
"""simple docstring"""
import numpy as np
__UpperCAmelCase = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class _SCREAMING_SNAKE_CASE :
def __init__( self ) -> None:
lowerCAmelCase_ :int = np.array(__A )
def __lowerCAmelCase ( self , __A ) -> np.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = np.where(letter == self.SQUARE )
lowerCAmelCase_ :Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __lowerCAmelCase ( self , __A , __A ) -> str:
lowerCAmelCase_ :str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :Union[str, Any] = message.lower()
lowerCAmelCase_ :int = message.replace(""" """ , """""" )
lowerCAmelCase_ :str = message.replace("""j""" , """i""" )
lowerCAmelCase_ :Union[str, Any] = np.empty((2, len(__A )) )
for letter_index in range(len(__A ) ):
lowerCAmelCase_ :Any = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase_ :Tuple = numbers[0]
lowerCAmelCase_ :Dict = numbers[1]
lowerCAmelCase_ :List[str] = first_step.reshape(2 * len(__A ) )
lowerCAmelCase_ :Tuple = """"""
for numbers_index in range(len(__A ) ):
lowerCAmelCase_ :Tuple = int(second_step[numbers_index * 2] )
lowerCAmelCase_ :List[Any] = int(second_step[(numbers_index * 2) + 1] )
lowerCAmelCase_ :List[str] = self.numbers_to_letter(__A , __A )
lowerCAmelCase_ :Optional[Any] = encoded_message + letter
return encoded_message
def __lowerCAmelCase ( self , __A ) -> str:
lowerCAmelCase_ :List[str] = message.lower()
message.replace(""" """ , """""" )
lowerCAmelCase_ :Optional[Any] = np.empty(2 * len(__A ) )
for letter_index in range(len(__A ) ):
lowerCAmelCase_ :Dict = self.letter_to_numbers(message[letter_index] )
lowerCAmelCase_ :Union[str, Any] = numbers[0]
lowerCAmelCase_ :List[Any] = numbers[1]
lowerCAmelCase_ :List[Any] = first_step.reshape((2, len(__A )) )
lowerCAmelCase_ :int = """"""
for numbers_index in range(len(__A ) ):
lowerCAmelCase_ :List[Any] = int(second_step[0, numbers_index] )
lowerCAmelCase_ :List[str] = int(second_step[1, numbers_index] )
lowerCAmelCase_ :Union[str, Any] = self.numbers_to_letter(__A , __A )
lowerCAmelCase_ :Optional[Any] = decoded_message + letter
return decoded_message
| 1 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = 1.6021e-19 # units = C
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float , ) -> tuple[str, float]:
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : int , lowercase__ : int ) -> list[str]:
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
lowerCAmelCase_ :Optional[int] = number_of_bytes // partitions
lowerCAmelCase_ :Any = []
for i in range(lowercase__ ):
lowerCAmelCase_ :List[str] = i * bytes_per_partition + 1
lowerCAmelCase_ :Optional[int] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Dict , lowercase__ : int , lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(lowercase__ , config=lowercase__ )
lowerCAmelCase_ :int = downstream_dict["""projector.weight"""]
lowerCAmelCase_ :Union[str, Any] = downstream_dict["""projector.bias"""]
lowerCAmelCase_ :Union[str, Any] = downstream_dict["""model.post_net.linear.weight"""]
lowerCAmelCase_ :Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = WavaVecaForAudioFrameClassification.from_pretrained(lowercase__ , config=lowercase__ )
lowerCAmelCase_ :int = downstream_dict["""model.linear.weight"""]
lowerCAmelCase_ :Union[str, Any] = downstream_dict["""model.linear.bias"""]
return model
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = WavaVecaForXVector.from_pretrained(lowercase__ , config=lowercase__ )
lowerCAmelCase_ :List[str] = downstream_dict["""connector.weight"""]
lowerCAmelCase_ :List[str] = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase_ :str = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
lowerCAmelCase_ :int = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
lowerCAmelCase_ :Optional[int] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowerCAmelCase_ :List[Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowerCAmelCase_ :Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowerCAmelCase_ :str = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowerCAmelCase_ :Optional[Any] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location="""cpu""" )
lowerCAmelCase_ :Optional[Any] = checkpoint["""Downstream"""]
lowerCAmelCase_ :Tuple = WavaVecaConfig.from_pretrained(lowercase__ )
lowerCAmelCase_ :Dict = WavaVecaFeatureExtractor.from_pretrained(
lowercase__ , return_attention_mask=lowercase__ , do_normalize=lowercase__ )
lowerCAmelCase_ :Union[str, Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowerCAmelCase_ :Optional[Any] = convert_classification(lowercase__ , lowercase__ , lowercase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowerCAmelCase_ :List[Any] = convert_diarization(lowercase__ , lowercase__ , lowercase__ )
elif arch.endswith("""ForXVector""" ):
lowerCAmelCase_ :int = convert_xvector(lowercase__ , lowercase__ , lowercase__ )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase_ :str = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(lowercase__ )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__UpperCAmelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _snake_case ( lowercase__ : str = "laptop" ) -> DataFrame:
'''simple docstring'''
lowerCAmelCase_ :Dict = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCAmelCase_ :List[str] = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
lowerCAmelCase_ :List[Any] = BeautifulSoup(requests.get(lowercase__ , headers=lowercase__ ).text )
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ :Union[str, Any] = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
lowerCAmelCase_ :str = item.ha.text
lowerCAmelCase_ :Dict = """https://www.amazon.in/""" + item.ha.a["""href"""]
lowerCAmelCase_ :int = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
lowerCAmelCase_ :Dict = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
lowerCAmelCase_ :int = """Not available"""
try:
lowerCAmelCase_ :str = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
lowerCAmelCase_ :Optional[Any] = """"""
try:
lowerCAmelCase_ :str = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 1_0_0 )
except ValueError:
lowerCAmelCase_ :Union[str, Any] = float("""nan""" )
except AttributeError:
pass
lowerCAmelCase_ :Any = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ :List[Any] = """ """
lowerCAmelCase_ :Tuple = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 1 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'tokenizer_file': {
'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json',
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json',
},
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ :int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Any = ["input_ids", "attention_mask"]
UpperCAmelCase_ :List[Any] = None
def __init__( self , __A=None , __A=None , __A=None , __A="<unk>" , __A="<s>" , __A="</s>" , __A="<pad>" , __A=False , __A=False , **__A , ) -> Optional[int]:
super().__init__(
__A , __A , tokenizer_file=__A , unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , add_prefix_space=__A , clean_up_tokenization_spaces=__A , **__A , )
lowerCAmelCase_ :str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __A ) != add_prefix_space:
lowerCAmelCase_ :Optional[int] = getattr(__A , pre_tok_state.pop("""type""" ) )
lowerCAmelCase_ :Union[str, Any] = add_prefix_space
lowerCAmelCase_ :Any = pre_tok_class(**__A )
lowerCAmelCase_ :Dict = add_prefix_space
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :str = kwargs.get("""is_split_into_words""" , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , *__A , **__A ) -> BatchEncoding:
lowerCAmelCase_ :int = kwargs.get("""is_split_into_words""" , __A )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
""" pretokenized inputs.""" )
return super()._encode_plus(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
lowerCAmelCase_ :Tuple = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def __lowerCAmelCase ( self , __A ) -> List[int]:
lowerCAmelCase_ :List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__A , add_special_tokens=__A ) + [self.eos_token_id] )
if len(__A ) > self.model_max_length:
lowerCAmelCase_ :List[str] = input_ids[-self.model_max_length :]
return input_ids
| 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = """laion/clap-htsat-unfused"""
lowerCAmelCase_ :Optional[Any] = tempfile.mkdtemp()
def __lowerCAmelCase ( self , **__A ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self , **__A ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def __lowerCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ :str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase_ :Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowerCAmelCase_ :Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = self.get_feature_extractor()
lowerCAmelCase_ :str = self.get_tokenizer()
lowerCAmelCase_ :List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :Optional[Any] = floats_list((3, 1000) )
lowerCAmelCase_ :Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowerCAmelCase_ :str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :List[Any] = """This is a test string"""
lowerCAmelCase_ :Dict = processor(text=__A )
lowerCAmelCase_ :List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.get_feature_extractor()
lowerCAmelCase_ :Tuple = self.get_tokenizer()
lowerCAmelCase_ :Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowerCAmelCase_ :int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ :Tuple = processor.batch_decode(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.get_feature_extractor()
lowerCAmelCase_ :Any = self.get_tokenizer()
lowerCAmelCase_ :Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 1 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = 1
lowerCAmelCase_ :List[Any] = 3
lowerCAmelCase_ :List[Any] = (32, 32)
lowerCAmelCase_ :Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A )
return image
@property
def __lowerCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__A , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
return CLIPTextModel(__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :List[str] = self.dummy_cond_unet_upscale
lowerCAmelCase_ :List[str] = DDPMScheduler()
lowerCAmelCase_ :Tuple = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCAmelCase_ :Any = self.dummy_vae
lowerCAmelCase_ :Any = self.dummy_text_encoder
lowerCAmelCase_ :Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :Tuple = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ :Optional[Any] = StableDiffusionUpscalePipeline(
unet=__A , low_res_scheduler=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , max_noise_level=350 , )
lowerCAmelCase_ :List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Tuple = torch.Generator(device=__A ).manual_seed(0 )
lowerCAmelCase_ :Any = sd_pipe(
[prompt] , image=__A , generator=__A , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase_ :int = output.images
lowerCAmelCase_ :Optional[Any] = torch.Generator(device=__A ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = sd_pipe(
[prompt] , image=__A , generator=__A , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__A , )[0]
lowerCAmelCase_ :Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ :str = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase_ :Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase_ :Tuple = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ :Any = self.dummy_cond_unet_upscale
lowerCAmelCase_ :str = DDPMScheduler()
lowerCAmelCase_ :Dict = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCAmelCase_ :Dict = self.dummy_vae
lowerCAmelCase_ :str = self.dummy_text_encoder
lowerCAmelCase_ :Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :Optional[int] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ :List[str] = StableDiffusionUpscalePipeline(
unet=__A , low_res_scheduler=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , max_noise_level=350 , )
lowerCAmelCase_ :Any = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Union[str, Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase_ :Any = output.images
assert image.shape[0] == 2
lowerCAmelCase_ :int = torch.Generator(device=__A ).manual_seed(0 )
lowerCAmelCase_ :Dict = sd_pipe(
[prompt] , image=__A , generator=__A , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase_ :List[str] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Optional[int] = self.dummy_cond_unet_upscale
lowerCAmelCase_ :str = DDPMScheduler()
lowerCAmelCase_ :str = DDIMScheduler(prediction_type="""v_prediction""" )
lowerCAmelCase_ :int = self.dummy_vae
lowerCAmelCase_ :int = self.dummy_text_encoder
lowerCAmelCase_ :Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :str = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase_ :Optional[Any] = unet.half()
lowerCAmelCase_ :Optional[int] = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ :Optional[int] = StableDiffusionUpscalePipeline(
unet=__A , low_res_scheduler=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , max_noise_level=350 , )
lowerCAmelCase_ :List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Dict = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Optional[int] = torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = sd_pipe(
[prompt] , image=__A , generator=__A , num_inference_steps=2 , output_type="""np""" , ).images
lowerCAmelCase_ :Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCAmelCase_ :List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
lowerCAmelCase_ :Optional[Any] = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCAmelCase_ :List[Any] = StableDiffusionUpscalePipeline.from_pretrained(__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowerCAmelCase_ :List[str] = """a cat sitting on a park bench"""
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = pipe(
prompt=__A , image=__A , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCAmelCase_ :int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
lowerCAmelCase_ :List[str] = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCAmelCase_ :int = StableDiffusionUpscalePipeline.from_pretrained(
__A , torch_dtype=torch.floataa , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowerCAmelCase_ :List[str] = """a cat sitting on a park bench"""
lowerCAmelCase_ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = pipe(
prompt=__A , image=__A , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ :Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
lowerCAmelCase_ :Dict = """stabilityai/stable-diffusion-x4-upscaler"""
lowerCAmelCase_ :List[str] = StableDiffusionUpscalePipeline.from_pretrained(
__A , torch_dtype=torch.floataa , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ :Dict = """a cat sitting on a park bench"""
lowerCAmelCase_ :List[str] = torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = pipe(
prompt=__A , image=__A , generator=__A , num_inference_steps=5 , output_type="""np""" , )
lowerCAmelCase_ :Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 1 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 |
"""simple docstring"""
import itertools
import math
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _snake_case ( lowercase__ : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__UpperCAmelCase = numpy.array([0, 0])
__UpperCAmelCase = numpy.array([0.5, 0.8660254])
__UpperCAmelCase = numpy.array([1, 0])
__UpperCAmelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _snake_case ( lowercase__ : list[numpy.ndarray] , lowercase__ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = initial_vectors
for _ in range(lowercase__ ):
lowerCAmelCase_ :List[Any] = iteration_step(lowercase__ )
return vectors
def _snake_case ( lowercase__ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
lowerCAmelCase_ :List[Any] = vectors[i + 1]
new_vectors.append(lowercase__ )
lowerCAmelCase_ :str = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _snake_case ( lowercase__ : numpy.ndarray , lowercase__ : float ) -> numpy.ndarray:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = numpy.radians(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = numpy.cos(lowercase__ ), numpy.sin(lowercase__ )
lowerCAmelCase_ :Tuple = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase__ , lowercase__ )
def _snake_case ( lowercase__ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowerCAmelCase_ :Tuple = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowerCAmelCase_ , lowerCAmelCase_ :Any = zip(*lowercase__ )
plt.plot(lowercase__ , lowercase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 1 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
UpperCAmelCase_ :Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :List[Any] = CLIPTextModel(__A )
lowerCAmelCase_ :int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Union[str, Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> List[str]:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Tuple = torch.manual_seed(__A )
else:
lowerCAmelCase_ :Optional[int] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :List[Any] = 2
lowerCAmelCase_ :int = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , )
lowerCAmelCase_ :Optional[int] = floats_tensor(control_image.shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> int:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase_ :int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase_ :str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase_ :int = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__A ):
if isinstance(__A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
lowerCAmelCase_ :List[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__A )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__A , set_alpha_to_one=__A , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCAmelCase_ :str = CLIPTextModel(__A )
lowerCAmelCase_ :str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ :Optional[Any] = MultiControlNetModel([controlneta, controlneta] )
lowerCAmelCase_ :List[Any] = {
"""unet""": unet,
"""controlnet""": controlnet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __lowerCAmelCase ( self , __A , __A=0 ) -> str:
if str(__A ).startswith("""mps""" ):
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(__A )
else:
lowerCAmelCase_ :List[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :Optional[int] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__A , device=torch.device(__A ) , ),
]
lowerCAmelCase_ :int = floats_tensor(control_image[0].shape , rng=random.Random(__A ) ).to(__A )
lowerCAmelCase_ :Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ :List[Any] = Image.fromarray(np.uinta(__A ) ).convert("""RGB""" ).resize((64, 64) )
lowerCAmelCase_ :List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""image""": image,
"""control_image""": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
lowerCAmelCase_ :Union[str, Any] = 1_0.0
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :Tuple = self.get_dummy_inputs(__A )
lowerCAmelCase_ :List[str] = steps
lowerCAmelCase_ :int = scale
lowerCAmelCase_ :Union[str, Any] = pipe(**__A )[0]
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = steps
lowerCAmelCase_ :str = scale
lowerCAmelCase_ :Tuple = pipe(**__A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = steps
lowerCAmelCase_ :Union[str, Any] = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
lowerCAmelCase_ :List[str] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = steps
lowerCAmelCase_ :Tuple = scale
lowerCAmelCase_ :str = pipe(**__A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowerCAmelCase ( self ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" )
lowerCAmelCase_ :int = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , safety_checker=__A , controlnet=__A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase_ :List[Any] = """evil space-punk bird"""
lowerCAmelCase_ :List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) )
lowerCAmelCase_ :int = load_image(
"""https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) )
lowerCAmelCase_ :Union[str, Any] = pipe(
__A , __A , control_image=__A , generator=__A , output_type="""np""" , num_inference_steps=50 , strength=0.6 , )
lowerCAmelCase_ :Tuple = output.images[0]
assert image.shape == (512, 512, 3)
lowerCAmelCase_ :Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" )
assert np.abs(expected_image - image ).max() < 9E-2
| 1 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ ):
UpperCAmelCase_ :List[str] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1E-5 , __A = 0.0_2 , __A = True , __A = True , __A = False , __A = False , ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :List[str] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
f""" `n_embd`: {n_embd} are not equal.""" )
lowerCAmelCase_ :Union[str, Any] = prefix_inner_dim
lowerCAmelCase_ :str = prefix_hidden_dim
lowerCAmelCase_ :str = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCAmelCase_ :List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCAmelCase_ :Any = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCAmelCase_ :Any = GPTaLMHeadModel(__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , __A = None , ) -> List[str]:
lowerCAmelCase_ :str = self.transformer.transformer.wte(__A )
lowerCAmelCase_ :Any = self.encode_prefix(__A )
lowerCAmelCase_ :Optional[Any] = self.decode_prefix(__A )
lowerCAmelCase_ :Optional[int] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCAmelCase_ :int = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCAmelCase_ :Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCAmelCase_ :Tuple = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCAmelCase ( self , __A , __A ) -> torch.Tensor:
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
return self.encode_prefix(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Tuple = torch.split(__A , 1 , dim=0 )
lowerCAmelCase_ :Optional[int] = []
lowerCAmelCase_ :List[str] = []
for feature in features:
lowerCAmelCase_ :Tuple = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCAmelCase_ :Tuple = torch.stack(__A )
lowerCAmelCase_ :int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = eos_token_id
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :Any = None
lowerCAmelCase_ :int = torch.ones(__A , device=__A , dtype=torch.int )
lowerCAmelCase_ :Optional[int] = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCAmelCase_ :List[str] = input_embeds
else:
lowerCAmelCase_ :Union[str, Any] = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCAmelCase_ :Optional[int] = self.transformer(inputs_embeds=__A )
lowerCAmelCase_ :str = outputs.logits
lowerCAmelCase_ :str = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCAmelCase_ :Dict = logits.softmax(-1 ).log()
if scores is None:
lowerCAmelCase_ , lowerCAmelCase_ :Any = logits.topk(__A , -1 )
lowerCAmelCase_ :Union[str, Any] = generated.expand(__A , *generated.shape[1:] )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCAmelCase_ :List[str] = next_tokens
else:
lowerCAmelCase_ :List[Any] = tokens.expand(__A , *tokens.shape[1:] )
lowerCAmelCase_ :Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCAmelCase_ :List[Any] = -float(np.inf )
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCAmelCase_ :List[Any] = scores_sum / seq_lengths[:, None]
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCAmelCase_ :Optional[Any] = next_tokens // scores_sum.shape[1]
lowerCAmelCase_ :Dict = seq_lengths[next_tokens_source]
lowerCAmelCase_ :Tuple = next_tokens % scores_sum.shape[1]
lowerCAmelCase_ :Optional[Any] = next_tokens.unsqueeze(1 )
lowerCAmelCase_ :str = tokens[next_tokens_source]
lowerCAmelCase_ :List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
lowerCAmelCase_ :Dict = generated[next_tokens_source]
lowerCAmelCase_ :Dict = scores_sum_average * seq_lengths
lowerCAmelCase_ :Tuple = is_stopped[next_tokens_source]
lowerCAmelCase_ :str = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCAmelCase_ :List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
lowerCAmelCase_ :Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCAmelCase_ :str = scores / seq_lengths
lowerCAmelCase_ :Optional[int] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCAmelCase_ :Optional[Any] = [tokens[i] for i in order]
lowerCAmelCase_ :Dict = torch.stack(__A , dim=0 )
lowerCAmelCase_ :Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 1 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Dict = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowerCAmelCase_ :Tuple = 1_2_8
elif "12-12" in model_name:
lowerCAmelCase_ :Optional[Any] = 1_2
lowerCAmelCase_ :Dict = 1_2
elif "14-14" in model_name:
lowerCAmelCase_ :Any = 1_4
lowerCAmelCase_ :int = 1_4
elif "16-16" in model_name:
lowerCAmelCase_ :Optional[int] = 1_6
lowerCAmelCase_ :str = 1_6
else:
raise ValueError("""Model not supported""" )
lowerCAmelCase_ :Optional[Any] = """huggingface/label-files"""
if "speech-commands" in model_name:
lowerCAmelCase_ :Any = 3_5
lowerCAmelCase_ :str = """speech-commands-v2-id2label.json"""
else:
lowerCAmelCase_ :Any = 5_2_7
lowerCAmelCase_ :List[Any] = """audioset-id2label.json"""
lowerCAmelCase_ :Tuple = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :int = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ :List[str] = idalabel
lowerCAmelCase_ :str = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
if "module.v" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
lowerCAmelCase_ :Tuple = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
lowerCAmelCase_ :Tuple = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowerCAmelCase_ :List[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
lowerCAmelCase_ :List[str] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
lowerCAmelCase_ :Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase_ :str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase_ :str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ :Any = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase_ :List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ :Tuple = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowerCAmelCase_ :Any = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
lowerCAmelCase_ :Tuple = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
lowerCAmelCase_ :Tuple = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def _snake_case ( lowercase__ : List[str] , lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ :Dict = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
lowerCAmelCase_ :Union[str, Any] = key.split(""".""" )
lowerCAmelCase_ :Union[str, Any] = int(key_split[3] )
lowerCAmelCase_ :Optional[int] = config.hidden_size
if "weight" in key:
lowerCAmelCase_ :Optional[Any] = val[:dim, :]
lowerCAmelCase_ :Optional[Any] = val[dim : dim * 2, :]
lowerCAmelCase_ :str = val[-dim:, :]
else:
lowerCAmelCase_ :str = val[:dim]
lowerCAmelCase_ :str = val[dim : dim * 2]
lowerCAmelCase_ :Tuple = val[-dim:]
else:
lowerCAmelCase_ :Any = val
return orig_state_dict
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
@torch.no_grad()
def _snake_case ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : int=False ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = get_audio_spectrogram_transformer_config(lowercase__ )
lowerCAmelCase_ :str = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
lowerCAmelCase_ :List[Any] = model_name_to_url[model_name]
lowerCAmelCase_ :int = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
# remove some keys
remove_keys(lowercase__ )
# rename some keys
lowerCAmelCase_ :List[Any] = convert_state_dict(lowercase__ , lowercase__ )
# load 🤗 model
lowerCAmelCase_ :int = ASTForAudioClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowerCAmelCase_ :Dict = -4.2677393 if """speech-commands""" not in model_name else -6.845978
lowerCAmelCase_ :Union[str, Any] = 4.5689974 if """speech-commands""" not in model_name else 5.5654526
lowerCAmelCase_ :Optional[Any] = 1_0_2_4 if """speech-commands""" not in model_name else 1_2_8
lowerCAmelCase_ :str = ASTFeatureExtractor(mean=lowercase__ , std=lowercase__ , max_length=lowercase__ )
if "speech-commands" in model_name:
lowerCAmelCase_ :Any = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
lowerCAmelCase_ :List[str] = dataset[0]["""audio"""]["""array"""]
else:
lowerCAmelCase_ :Any = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = torchaudio.load(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = waveform.squeeze().numpy()
lowerCAmelCase_ :List[str] = feature_extractor(lowercase__ , sampling_rate=1_6_0_0_0 , return_tensors="""pt""" )
# forward pass
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowerCAmelCase_ :Dict = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowerCAmelCase_ :Optional[int] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowerCAmelCase_ :Optional[int] = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowerCAmelCase_ :Union[str, Any] = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowerCAmelCase_ :List[Any] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowerCAmelCase_ :List[str] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowerCAmelCase_ :int = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowerCAmelCase_ :Any = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowercase__ , atol=1E-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase__ )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(lowercase__ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def _snake_case ( lowercase__ : Tuple ) -> int:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _snake_case ( lowercase__ : Tuple , lowercase__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Any = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowerCAmelCase_ :Optional[int] = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
lowerCAmelCase_ :str = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
lowerCAmelCase_ :List[Any] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
lowerCAmelCase_ :List[str] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
lowerCAmelCase_ :str = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
lowerCAmelCase_ :List[str] = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
lowerCAmelCase_ :Tuple = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
lowerCAmelCase_ :Tuple = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
lowerCAmelCase_ :Any = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
lowerCAmelCase_ :Tuple = key.replace("""image_encoder.module""" , """flava.image_model""" )
lowerCAmelCase_ :Dict = key.replace("""text_encoder.module""" , """flava.text_model""" )
lowerCAmelCase_ :int = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
lowerCAmelCase_ :Tuple = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
lowerCAmelCase_ :Optional[Any] = key.replace("""text_projection""" , """flava.text_projection""" )
lowerCAmelCase_ :Any = key.replace("""image_projection""" , """flava.image_projection""" )
lowerCAmelCase_ :int = value.float()
for key, value in codebook_state_dict.items():
lowerCAmelCase_ :Dict = value
return upgrade
@torch.no_grad()
def _snake_case ( lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : int=None ) -> Tuple:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ :Union[str, Any] = FlavaConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ :Union[str, Any] = FlavaConfig()
lowerCAmelCase_ :str = FlavaForPreTraining(lowercase__ ).eval()
lowerCAmelCase_ :Union[str, Any] = convert_dalle_checkpoint(lowercase__ , lowercase__ , save_checkpoint=lowercase__ )
if os.path.exists(lowercase__ ):
lowerCAmelCase_ :str = torch.load(lowercase__ , map_location="""cpu""" )
else:
lowerCAmelCase_ :List[str] = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
lowerCAmelCase_ :Dict = upgrade_state_dict(lowercase__ , lowercase__ )
hf_model.load_state_dict(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = hf_model.state_dict()
lowerCAmelCase_ :Any = count_parameters(lowercase__ )
lowerCAmelCase_ :str = count_parameters(lowercase__ ) + count_parameters(lowercase__ )
assert torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
hf_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__UpperCAmelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DeiTFeatureExtractor']
__UpperCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ) -> List[str]:
lowerCAmelCase_ :List[str] = parent
lowerCAmelCase_ :str = batch_size
lowerCAmelCase_ :int = seq_length
lowerCAmelCase_ :str = is_training
lowerCAmelCase_ :List[Any] = use_input_mask
lowerCAmelCase_ :Union[str, Any] = use_token_type_ids
lowerCAmelCase_ :int = use_labels
lowerCAmelCase_ :List[Any] = vocab_size
lowerCAmelCase_ :Tuple = hidden_size
lowerCAmelCase_ :Union[str, Any] = num_hidden_layers
lowerCAmelCase_ :Any = num_attention_heads
lowerCAmelCase_ :Tuple = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Any = hidden_dropout_prob
lowerCAmelCase_ :List[Any] = attention_probs_dropout_prob
lowerCAmelCase_ :List[str] = max_position_embeddings
lowerCAmelCase_ :Optional[int] = type_vocab_size
lowerCAmelCase_ :Optional[int] = type_sequence_label_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :List[Any] = num_labels
lowerCAmelCase_ :Tuple = num_choices
lowerCAmelCase_ :Optional[Any] = relative_attention
lowerCAmelCase_ :str = position_biased_input
lowerCAmelCase_ :List[Any] = pos_att_type
lowerCAmelCase_ :Any = scope
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :int = None
if self.use_input_mask:
lowerCAmelCase_ :Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :Optional[int] = None
lowerCAmelCase_ :str = None
lowerCAmelCase_ :str = None
if self.use_labels:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :Optional[int] = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Any = TFDebertaVaModel(config=__A )
lowerCAmelCase_ :Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase_ :int = [input_ids, input_mask]
lowerCAmelCase_ :Union[str, Any] = model(__A )
lowerCAmelCase_ :Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :int = TFDebertaVaForMaskedLM(config=__A )
lowerCAmelCase_ :Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = self.num_labels
lowerCAmelCase_ :int = TFDebertaVaForSequenceClassification(config=__A )
lowerCAmelCase_ :List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = self.num_labels
lowerCAmelCase_ :Dict = TFDebertaVaForTokenClassification(config=__A )
lowerCAmelCase_ :List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Tuple = TFDebertaVaForQuestionAnswering(config=__A )
lowerCAmelCase_ :List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ :List[str] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Tuple = config_and_inputs
lowerCAmelCase_ :Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ :Tuple = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :int = False
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Any = TFDebertaVaModelTester(self )
lowerCAmelCase_ :int = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(__A )
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __lowerCAmelCase ( self ) -> Any:
pass
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
lowerCAmelCase_ :Optional[Any] = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCAmelCase_ :Optional[int] = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase_ :Dict = model(__A , attention_mask=__A )[0]
lowerCAmelCase_ :Dict = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , __A , atol=1E-4 )
| 1 |
"""simple docstring"""
__UpperCAmelCase = 2_56
# Modulus to hash a string
__UpperCAmelCase = 1_00_00_03
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
lowerCAmelCase_ :List[str] = len(lowercase__ )
if p_len > t_len:
return False
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase__ ):
lowerCAmelCase_ :int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase_ :Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase_ :Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase_ :Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ :int = """abc1abc12"""
lowerCAmelCase_ :Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowerCAmelCase_ :int = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowercase__ , lowercase__ ) and not rabin_karp(lowercase__ , lowercase__ )
# Test 2)
lowerCAmelCase_ :Dict = """ABABX"""
lowerCAmelCase_ :int = """ABABZABABYABABX"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 3)
lowerCAmelCase_ :Union[str, Any] = """AAAB"""
lowerCAmelCase_ :List[str] = """ABAAAAAB"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 4)
lowerCAmelCase_ :Dict = """abcdabcy"""
lowerCAmelCase_ :Union[str, Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowercase__ , lowercase__ )
# Test 5)
lowerCAmelCase_ :Optional[int] = """Lü"""
lowerCAmelCase_ :Optional[int] = """Lüsai"""
assert rabin_karp(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[int] = """Lue"""
assert not rabin_karp(lowercase__ , lowercase__ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 1 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
__UpperCAmelCase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = torch.load(lowercase__ , map_location="""cpu""" )
return sd
def _snake_case ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : List[str]=rename_keys_prefix ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = OrderedDict()
lowerCAmelCase_ :str = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase_ :List[str] = key
for name_pair in rename_keys_prefix:
lowerCAmelCase_ :Optional[Any] = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase_ :int = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase_ :str = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Dict ) -> int:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase_ :Optional[Any] = """pretraining"""
if "vcr" in checkpoint_path:
lowerCAmelCase_ :List[Any] = {"""visual_embedding_dim""": 5_1_2}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase_ :Optional[Any] = {"""visual_embedding_dim""": 2_0_4_8}
elif "vqa" in checkpoint_path:
lowerCAmelCase_ :int = {"""visual_embedding_dim""": 2_0_4_8}
elif "nlvr" in checkpoint_path:
lowerCAmelCase_ :List[str] = {"""visual_embedding_dim""": 1_0_2_4}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase_ :Tuple = {"""visual_embedding_dim""": 5_1_2}
lowerCAmelCase_ :List[Any] = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase_ :Optional[int] = {"""visual_embedding_dim""": 2_0_4_8}
lowerCAmelCase_ :Tuple = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowerCAmelCase_ :List[Any] = {"""visual_embedding_dim""": 2_0_4_8, """num_labels""": 3_1_2_9}
lowerCAmelCase_ :Dict = """vqa"""
elif "nlvr" in checkpoint_path:
lowerCAmelCase_ :Optional[Any] = {
"""visual_embedding_dim""": 1_0_2_4,
"""num_labels""": 2,
}
lowerCAmelCase_ :Any = """nlvr"""
lowerCAmelCase_ :List[str] = VisualBertConfig(**lowercase__ )
# Load State Dict
lowerCAmelCase_ :Optional[int] = load_state_dict(lowercase__ )
lowerCAmelCase_ :int = get_new_dict(lowercase__ , lowercase__ )
if model_type == "pretraining":
lowerCAmelCase_ :List[Any] = VisualBertForPreTraining(lowercase__ )
elif model_type == "vqa":
lowerCAmelCase_ :Any = VisualBertForQuestionAnswering(lowercase__ )
elif model_type == "nlvr":
lowerCAmelCase_ :Any = VisualBertForVisualReasoning(lowercase__ )
elif model_type == "multichoice":
lowerCAmelCase_ :Optional[int] = VisualBertForMultipleChoice(lowercase__ )
model.load_state_dict(lowercase__ )
# Save Checkpoints
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
__UpperCAmelCase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : int ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ :Optional[Any] = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ :int = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ :List[Any] = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ :List[str] = 8
else:
lowerCAmelCase_ :Optional[int] = None
return tokenizer.pad(
lowercase__ , padding="""longest""" , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase__ ) == "1":
lowerCAmelCase_ :Optional[Any] = 2
# New Code #
lowerCAmelCase_ :List[str] = int(args.gradient_accumulation_steps )
lowerCAmelCase_ :int = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase_ :str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :int = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = get_dataloaders(lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :Optional[int] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ :Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase_ :Optional[Any] = AdamW(params=model.parameters() , lr=lowercase__ )
# Instantiate scheduler
lowerCAmelCase_ :Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowercase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Now we train the model
for epoch in range(lowercase__ ):
model.train()
with LocalSGD(
accelerator=lowercase__ , model=lowercase__ , local_sgd_steps=lowercase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase__ ):
lowerCAmelCase_ :str = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = output.loss
accelerator.backward(lowercase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase__ , default=lowercase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCAmelCase_ :Optional[Any] = parser.parse_args()
lowerCAmelCase_ :Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def _snake_case ( lowercase__ : jnp.ndarray , lowercase__ : int , lowercase__ : float = 1 , lowercase__ : float = 1 , lowercase__ : float = 1.0E4 , lowercase__ : bool = False , lowercase__ : float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
lowerCAmelCase_ :int = float(embedding_dim // 2 )
lowerCAmelCase_ :Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCAmelCase_ :Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowercase__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCAmelCase_ :Dict = jnp.expand_dims(lowercase__ , 1 ) * jnp.expand_dims(lowercase__ , 0 )
# scale embeddings
lowerCAmelCase_ :List[str] = scale * emb
if flip_sin_to_cos:
lowerCAmelCase_ :Optional[Any] = jnp.concatenate([jnp.cos(lowercase__ ), jnp.sin(lowercase__ )] , axis=1 )
else:
lowerCAmelCase_ :List[Any] = jnp.concatenate([jnp.sin(lowercase__ ), jnp.cos(lowercase__ )] , axis=1 )
lowerCAmelCase_ :Optional[Any] = jnp.reshape(lowercase__ , [jnp.shape(lowercase__ )[0], embedding_dim] )
return signal
class _SCREAMING_SNAKE_CASE ( nn.Module ):
UpperCAmelCase_ :int = 32
UpperCAmelCase_ :jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , __A ) -> int:
lowerCAmelCase_ :Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(__A )
lowerCAmelCase_ :Optional[Any] = nn.silu(__A )
lowerCAmelCase_ :Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(__A )
return temb
class _SCREAMING_SNAKE_CASE ( nn.Module ):
UpperCAmelCase_ :int = 32
UpperCAmelCase_ :bool = False
UpperCAmelCase_ :float = 1
@nn.compact
def __call__( self , __A ) -> Tuple:
return get_sinusoidal_embeddings(
__A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 1 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=3 , __A=32 , __A=3 , __A=10 , __A=[10, 20, 30, 40] , __A=[1, 1, 2, 1] , __A=True , __A=True , __A="relu" , __A=3 , __A=None , ) -> Any:
lowerCAmelCase_ :Optional[Any] = parent
lowerCAmelCase_ :List[Any] = batch_size
lowerCAmelCase_ :Optional[int] = image_size
lowerCAmelCase_ :Dict = num_channels
lowerCAmelCase_ :Optional[Any] = embeddings_size
lowerCAmelCase_ :Union[str, Any] = hidden_sizes
lowerCAmelCase_ :List[Any] = depths
lowerCAmelCase_ :Any = is_training
lowerCAmelCase_ :Any = use_labels
lowerCAmelCase_ :Optional[int] = hidden_act
lowerCAmelCase_ :Optional[int] = num_labels
lowerCAmelCase_ :int = scope
lowerCAmelCase_ :Any = len(__A )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ :Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Any:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = TFResNetModel(config=__A )
lowerCAmelCase_ :Optional[int] = model(__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.num_labels
lowerCAmelCase_ :Optional[int] = TFResNetForImageClassification(__A )
lowerCAmelCase_ :int = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = config_and_inputs
lowerCAmelCase_ :Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCAmelCase_ :List[str] = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase_ :List[Any] = False
UpperCAmelCase_ :str = False
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :Optional[int] = False
UpperCAmelCase_ :Optional[Any] = False
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = TFResNetModelTester(self )
lowerCAmelCase_ :Union[str, Any] = ConfigTester(self , config_class=__A , has_text_modality=__A )
def __lowerCAmelCase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCAmelCase ( self ) -> Optional[int]:
return
@unittest.skip(reason="""ResNet does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="""ResNet does not support input and output embeddings""" )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
lowerCAmelCase_ :Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase_ :Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Dict:
def check_hidden_states_output(__A , __A , __A ):
lowerCAmelCase_ :Any = model_class(__A )
lowerCAmelCase_ :List[str] = model(**self._prepare_for_class(__A , __A ) )
lowerCAmelCase_ :Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ :Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ :Any = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ :List[Any] = layer_type
lowerCAmelCase_ :Dict = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ :int = True
check_hidden_states_output(__A , __A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Dict:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :List[Any] = TFResNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[str] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ :Union[str, Any] = self.default_image_processor
lowerCAmelCase_ :Any = prepare_img()
lowerCAmelCase_ :Optional[Any] = image_processor(images=__A , return_tensors="""tf""" )
# forward pass
lowerCAmelCase_ :Union[str, Any] = model(**__A )
# verify the logits
lowerCAmelCase_ :Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase_ :List[Any] = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1E-4 ) )
| 1 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Union[str, Any]:
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowerCAmelCase_ :Tuple = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
lowerCAmelCase_ :str = json.load(__A )
else:
try:
lowerCAmelCase_ :Dict = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
lowerCAmelCase_ :int = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowerCAmelCase_ :Optional[Any] = config
self.set_stage_and_offload()
def __lowerCAmelCase ( self ) -> Tuple:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
lowerCAmelCase_ :Tuple = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowerCAmelCase_ :Dict = False
if self.is_zeroa() or self.is_zeroa():
lowerCAmelCase_ :Optional[int] = set(["""cpu""", """nvme"""] )
lowerCAmelCase_ :Union[str, Any] = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowerCAmelCase_ :Optional[int] = True
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ :str = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Tuple = ds_key_long.split(""".""" )
lowerCAmelCase_ :List[str] = nodes.pop()
for node in nodes:
lowerCAmelCase_ :Tuple = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def __lowerCAmelCase ( self , __A , __A=None ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def __lowerCAmelCase ( self , __A , __A=False ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.config
# find the config node of interest if it exists
lowerCAmelCase_ :Union[str, Any] = ds_key_long.split(""".""" )
for node in nodes:
lowerCAmelCase_ :int = config
lowerCAmelCase_ :Any = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = self.get_value(__A )
return False if value is None else bool(__A )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[str] = self.get_value(__A )
return False if value is None else not bool(__A )
def __lowerCAmelCase ( self ) -> str:
return self._stage == 2
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._stage == 3
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return self._offload
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = engine
def __lowerCAmelCase ( self , __A , **__A ) -> str:
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
super().__init__(__A , device_placement=__A , scaler=__A )
lowerCAmelCase_ :List[str] = hasattr(self.optimizer , """overflow""" )
def __lowerCAmelCase ( self , __A=None ) -> Optional[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def __lowerCAmelCase ( self ) -> List[Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def __lowerCAmelCase ( self ) -> int:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Optional[int]:
super().__init__(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=0.0_0_1 , __A=0 , **__A ) -> List[Any]:
lowerCAmelCase_ :str = params
lowerCAmelCase_ :Any = lr
lowerCAmelCase_ :List[Any] = weight_decay
lowerCAmelCase_ :Any = kwargs
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=None , __A=0 , **__A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = optimizer
lowerCAmelCase_ :int = total_num_steps
lowerCAmelCase_ :List[Any] = warmup_num_steps
lowerCAmelCase_ :int = kwargs
| 1 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :str = self.dummy_uncond_unet
lowerCAmelCase_ :str = DDIMScheduler()
lowerCAmelCase_ :Optional[int] = self.dummy_vq_model
lowerCAmelCase_ :Tuple = LDMPipeline(unet=__A , vqvae=__A , scheduler=__A )
ldm.to(__A )
ldm.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = ldm(generator=__A , num_inference_steps=2 , output_type="""numpy""" ).images
lowerCAmelCase_ :Optional[int] = torch.manual_seed(0 )
lowerCAmelCase_ :Tuple = ldm(generator=__A , num_inference_steps=2 , output_type="""numpy""" , return_dict=__A )[0]
lowerCAmelCase_ :int = image[0, -3:, -3:, -1]
lowerCAmelCase_ :int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ :Dict = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
lowerCAmelCase_ :Any = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :str = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__A )
ldm.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = torch.manual_seed(0 )
lowerCAmelCase_ :Any = ldm(generator=__A , num_inference_steps=5 , output_type="""numpy""" ).images
lowerCAmelCase_ :Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ :Dict = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
lowerCAmelCase_ :Optional[int] = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 1 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Tuple:
super().__init__()
lowerCAmelCase_ :int = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = down_samples, mid_sample
else:
lowerCAmelCase_ :str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Dict:
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
lowerCAmelCase_ :Optional[Any] = model_path_to_save + f"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> int:
lowerCAmelCase_ :Optional[int] = 0
lowerCAmelCase_ :Union[str, Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCAmelCase_ :Union[str, Any] = pretrained_model_path
while os.path.isdir(__A ):
lowerCAmelCase_ :Optional[int] = ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
lowerCAmelCase_ :int = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" )
if len(__A ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__A )
| 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.0_2 , __A=3 , __A=None , __A=2 , ) -> int:
lowerCAmelCase_ :List[Any] = parent
lowerCAmelCase_ :Any = batch_size
lowerCAmelCase_ :int = image_size
lowerCAmelCase_ :Tuple = patch_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :Union[str, Any] = is_training
lowerCAmelCase_ :Union[str, Any] = use_labels
lowerCAmelCase_ :Any = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :Union[str, Any] = num_attention_heads
lowerCAmelCase_ :Any = intermediate_size
lowerCAmelCase_ :Dict = hidden_act
lowerCAmelCase_ :Tuple = hidden_dropout_prob
lowerCAmelCase_ :Tuple = attention_probs_dropout_prob
lowerCAmelCase_ :Dict = type_sequence_label_size
lowerCAmelCase_ :Any = initializer_range
lowerCAmelCase_ :Tuple = scope
lowerCAmelCase_ :Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCAmelCase_ :List[Any] = (image_size // patch_size) ** 2
lowerCAmelCase_ :List[str] = num_patches + 2
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :Optional[int] = None
if self.use_labels:
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Dict = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[Any]:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]:
lowerCAmelCase_ :List[Any] = TFDeiTModel(config=__A )
lowerCAmelCase_ :str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :List[str] = TFDeiTForMaskedImageModeling(config=__A )
lowerCAmelCase_ :Optional[int] = model(__A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :Union[str, Any] = TFDeiTForMaskedImageModeling(__A )
lowerCAmelCase_ :int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ :Optional[Any] = model(__A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowerCAmelCase ( self , __A , __A , __A ) -> str:
lowerCAmelCase_ :str = self.type_sequence_label_size
lowerCAmelCase_ :Optional[Any] = TFDeiTForImageClassification(__A )
lowerCAmelCase_ :Dict = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ :int = 1
lowerCAmelCase_ :List[Any] = TFDeiTForImageClassification(__A )
lowerCAmelCase_ :Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ :int = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = config_and_inputs
lowerCAmelCase_ :Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Dict = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ :int = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :Union[str, Any] = False
UpperCAmelCase_ :str = False
UpperCAmelCase_ :Union[str, Any] = False
UpperCAmelCase_ :List[str] = False
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = TFDeiTModelTester(self )
lowerCAmelCase_ :Any = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def __lowerCAmelCase ( self ) -> List[Any]:
pass
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :List[str] = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCAmelCase_ :int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Dense ) )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Optional[int] = model_class(__A )
lowerCAmelCase_ :str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :Any = [*signature.parameters.keys()]
lowerCAmelCase_ :List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __lowerCAmelCase ( self , __A , __A , __A=False ) -> int:
lowerCAmelCase_ :List[str] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __lowerCAmelCase ( self ) -> List[str]:
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :List[Any] = TFDeiTModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Tuple:
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Tuple = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
lowerCAmelCase_ :List[str] = self.default_image_processor
lowerCAmelCase_ :int = prepare_img()
lowerCAmelCase_ :Tuple = image_processor(images=__A , return_tensors="""tf""" )
# forward pass
lowerCAmelCase_ :Tuple = model(**__A )
# verify the logits
lowerCAmelCase_ :str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
lowerCAmelCase_ :Any = tf.constant([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
| 1 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _snake_case ( lowercase__ : bool = True , *lowercase__ : Optional[int] , **lowercase__ : str ) -> Optional[Any]:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
lowerCAmelCase_ :Tuple = False
if main_process_only:
lowerCAmelCase_ :Dict = PartialState().local_process_index == 0
return _tqdm(*lowercase__ , **lowercase__ , disable=lowercase__ )
| 1 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case ( lowercase__ : List[str] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase_ :str = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowerCAmelCase_ :List[str] = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase_ :List[Any] = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase_ :Tuple = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowerCAmelCase_ :int = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase_ :Any = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowerCAmelCase_ :Any = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowerCAmelCase_ :List[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowerCAmelCase_ :Any = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowerCAmelCase_ :int = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowerCAmelCase_ :Optional[Any] = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowerCAmelCase_ :Union[str, Any] = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase_ :int = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowerCAmelCase_ :List[str] = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowerCAmelCase_ :Optional[int] = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowerCAmelCase_ :str = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Dict ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ :List[str] = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase_ :str = key.split(""".""" )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase_ :Tuple = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase_ :List[Any] = val[:dim, :]
lowerCAmelCase_ :Optional[int] = val[dim : dim * 2, :]
lowerCAmelCase_ :Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase_ :Any = val[:dim]
lowerCAmelCase_ :Tuple = val[dim : dim * 2]
lowerCAmelCase_ :Optional[int] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase_ :Optional[Any] = key.split(""".""" )
lowerCAmelCase_ :List[Any] = int(key_split[3] )
lowerCAmelCase_ :List[Any] = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ :Dict = val[:dim, :]
lowerCAmelCase_ :Tuple = val[
dim : dim * 2, :
]
lowerCAmelCase_ :Union[str, Any] = val[-dim:, :]
else:
lowerCAmelCase_ :List[str] = val[:dim]
lowerCAmelCase_ :int = val[dim : dim * 2]
lowerCAmelCase_ :Any = val[-dim:]
else:
lowerCAmelCase_ :List[Any] = rename_key(lowercase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase_ :int = val.squeeze_()
else:
lowerCAmelCase_ :int = val
return orig_state_dict
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Union[str, Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any]="groupvit-gcc-yfcc" , lowercase__ : List[Any]=False ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = GroupViTConfig()
lowerCAmelCase_ :List[str] = GroupViTModel(lowercase__ ).eval()
lowerCAmelCase_ :int = torch.load(lowercase__ , map_location="""cpu""" )["""model"""]
lowerCAmelCase_ :Any = convert_state_dict(lowercase__ , lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = model.load_state_dict(lowercase__ , strict=lowercase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase__ ) == 0)
# verify result
lowerCAmelCase_ :Optional[int] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowerCAmelCase_ :Optional[Any] = prepare_img()
lowerCAmelCase_ :Optional[Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowercase__ , padding=lowercase__ , return_tensors="""pt""" )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase_ :List[str] = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase_ :Union[str, Any] = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , lowercase__ , atol=1E-3 )
processor.save_pretrained(lowercase__ )
model.save_pretrained(lowercase__ )
print("""Successfully saved processor and model to""" , lowercase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowercase__ , organization="""nielsr""" )
model.push_to_hub(lowercase__ , organization="""nielsr""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__UpperCAmelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 1 |
"""simple docstring"""
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = 0
def __lowerCAmelCase ( self ) -> List[str]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(__A , __A )
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
try:
AutoConfig.register("""custom""" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""model""" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("""bert""" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase_ :Union[str, Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowerCAmelCase ( self ) -> Tuple:
with self.assertRaisesRegex(
__A , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" )
def __lowerCAmelCase ( self ) -> Any:
with self.assertRaisesRegex(
__A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" )
def __lowerCAmelCase ( self ) -> int:
with self.assertRaisesRegex(
__A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowerCAmelCase ( self ) -> Tuple:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowerCAmelCase ( self ) -> int:
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = "new-model"
try:
AutoConfig.register("""new-model""" , __A )
# If remote code is not set, the default is to use local
lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 1 | 1 |