code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: Tuple = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
lowercase__: List[str] = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
lowercase__: Any = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
lowercase__: Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
lowercase__: Optional[Any] = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
lowercase__: Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
lowercase__: Union[str, Any] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
lowercase__: Tuple = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
lowercase__: Dict = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
lowercase__: str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
lowercase__: Optional[Any] = key.replace('''image_encoder.module''' , '''flava.image_model''' )
lowercase__: Optional[Any] = key.replace('''text_encoder.module''' , '''flava.text_model''' )
lowercase__: Union[str, Any] = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
lowercase__: int = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
lowercase__: Union[str, Any] = key.replace('''text_projection''' , '''flava.text_projection''' )
lowercase__: Union[str, Any] = key.replace('''image_projection''' , '''flava.image_projection''' )
lowercase__: List[Any] = value.float()
for key, value in codebook_state_dict.items():
lowercase__: List[Any] = value
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Union[str, Any]:
if config_path is not None:
lowercase__: Any = FlavaConfig.from_pretrained(__UpperCAmelCase )
else:
lowercase__: Any = FlavaConfig()
lowercase__: str = FlavaForPreTraining(__UpperCAmelCase ).eval()
lowercase__: int = convert_dalle_checkpoint(__UpperCAmelCase , __UpperCAmelCase , save_checkpoint=__UpperCAmelCase )
if os.path.exists(__UpperCAmelCase ):
lowercase__: Optional[int] = torch.load(__UpperCAmelCase , map_location='''cpu''' )
else:
lowercase__: int = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )
lowercase__: Tuple = upgrade_state_dict(__UpperCAmelCase , __UpperCAmelCase )
hf_model.load_state_dict(__UpperCAmelCase )
lowercase__: List[Any] = hf_model.state_dict()
lowercase__: List[str] = count_parameters(__UpperCAmelCase )
lowercase__: Optional[int] = count_parameters(__UpperCAmelCase ) + count_parameters(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 )
hf_model.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__A = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 352 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
((lowercase__), ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 2 | 0 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
lowercase__: List[str] = data
lowercase__: int = [0X6745_2301, 0XEFCD_AB89, 0X98BA_DCFE, 0X1032_5476, 0XC3D2_E1F0]
@staticmethod
def _snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return ((n << b) | (n >> (32 - b))) & 0XFFFF_FFFF
def _snake_case ( self ):
lowercase__: List[Any] = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
lowercase__: List[Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def _snake_case ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[Any] = list(struct.unpack('''>16L''' , _UpperCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
lowercase__: List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def _snake_case ( self ):
lowercase__: int = self.padding()
lowercase__: List[Any] = self.split_blocks()
for block in self.blocks:
lowercase__: str = self.expand_block(_UpperCAmelCase )
lowercase__: int = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowercase__: int = (b & c) | ((~b) & d)
lowercase__: int = 0X5A82_7999
elif 20 <= i < 40:
lowercase__: int = b ^ c ^ d
lowercase__: List[str] = 0X6ED9_EBA1
elif 40 <= i < 60:
lowercase__: Tuple = (b & c) | (b & d) | (c & d)
lowercase__: Dict = 0X8F1B_BCDC
elif 60 <= i < 80:
lowercase__: Tuple = b ^ c ^ d
lowercase__: Optional[Any] = 0XCA62_C1D6
lowercase__: Any = (
self.rotate(_UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0XFFFF_FFFF,
a,
self.rotate(_UpperCAmelCase , 30 ),
c,
d,
)
lowercase__: Tuple = (
self.h[0] + a & 0XFFFF_FFFF,
self.h[1] + b & 0XFFFF_FFFF,
self.h[2] + c & 0XFFFF_FFFF,
self.h[3] + d & 0XFFFF_FFFF,
self.h[4] + e & 0XFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
lowercase__: List[Any] = b'''Test String'''
assert SHAaHash(__UpperCAmelCase ).final_hash() == hashlib.shaa(__UpperCAmelCase ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
lowercase__: List[Any] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
lowercase__: Dict = parser.parse_args()
lowercase__: List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
lowercase__: Union[str, Any] = f.read()
else:
lowercase__: int = bytes(__UpperCAmelCase , '''utf-8''' )
print(SHAaHash(__UpperCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 353 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "ctrl"
_UpperCAmelCase :int = ["past_key_values"]
_UpperCAmelCase :Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=246534 , _UpperCAmelCase=256 , _UpperCAmelCase=1280 , _UpperCAmelCase=8192 , _UpperCAmelCase=48 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[int] = n_positions
lowercase__: Optional[int] = n_embd
lowercase__: Any = n_layer
lowercase__: Any = n_head
lowercase__: int = dff
lowercase__: Dict = resid_pdrop
lowercase__: Any = embd_pdrop
lowercase__: Any = layer_norm_epsilon
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = use_cache
super().__init__(**_UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple: # noqa: E741
lowercase__: Dict = len(__UpperCAmelCase )
lowercase__: int = 0
lowercase__: str = [0] * n
lowercase__: Tuple = [False] * n
lowercase__: List[Any] = [False] * n
def dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if parent == root:
out_edge_count += 1
lowercase__: Any = True
lowercase__: Optional[Any] = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowercase__: Optional[Any] = dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[int] = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
lowercase__: List[str] = True
# AP found via cycle
if at == low[to]:
lowercase__: List[Any] = True
else:
lowercase__: Optional[int] = min(low[at] , __UpperCAmelCase )
return out_edge_count
for i in range(__UpperCAmelCase ):
if not visited[i]:
lowercase__: Any = 0
lowercase__: str = dfs(__UpperCAmelCase , __UpperCAmelCase , -1 , __UpperCAmelCase )
lowercase__: str = out_edge_count > 1
for x in range(len(__UpperCAmelCase ) ):
if is_art[x] is True:
print(__UpperCAmelCase )
# Adjacency list of graph
__A = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 354 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_0 ) -> int:
lowercase__: str = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 2 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=0.2 , _UpperCAmelCase=0.2 ):
lowercase__: int = bp_numa
lowercase__: Union[str, Any] = bp_numa
lowercase__: List[str] = bp_numa
lowercase__: str = conva_get[:2]
lowercase__: Union[str, Any] = conva_get[2]
lowercase__: Any = size_pa
lowercase__: Optional[Any] = rate_w
lowercase__: Tuple = rate_t
lowercase__: List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase__: Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase__: Union[str, Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
lowercase__: Any = -2 * np.random.rand(self.num_bpa ) + 1
def _snake_case ( self , _UpperCAmelCase ):
# save model dict with pickle
lowercase__: int = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_UpperCAmelCase , '''wb''' ) as f:
pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"""Model saved: {save_path}""" )
@classmethod
def _snake_case ( cls , _UpperCAmelCase ):
# read saved model
with open(_UpperCAmelCase , '''rb''' ) as f:
lowercase__: Optional[int] = pickle.load(_UpperCAmelCase ) # noqa: S301
lowercase__: Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
lowercase__: Any = model_dic.get('''size_pooling1''' )
lowercase__: int = model_dic.get('''num_bp1''' )
lowercase__: Optional[int] = model_dic.get('''num_bp2''' )
lowercase__: str = model_dic.get('''num_bp3''' )
lowercase__: Any = model_dic.get('''rate_weight''' )
lowercase__: Union[str, Any] = model_dic.get('''rate_thre''' )
# create model instance
lowercase__: str = CNN(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# modify model parameter
lowercase__: Dict = model_dic.get('''w_conv1''' )
lowercase__: Dict = model_dic.get('''wkj''' )
lowercase__: str = model_dic.get('''vji''' )
lowercase__: List[Any] = model_dic.get('''thre_conv1''' )
lowercase__: Optional[int] = model_dic.get('''thre_bp2''' )
lowercase__: Tuple = model_dic.get('''thre_bp3''' )
return conv_ins
def _snake_case ( self , _UpperCAmelCase ):
return 1 / (1 + np.exp(-1 * x ))
def _snake_case ( self , _UpperCAmelCase ):
return round(_UpperCAmelCase , 3 )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# convolution process
lowercase__: Any = convs[0]
lowercase__: Tuple = convs[1]
lowercase__: List[Any] = np.shape(_UpperCAmelCase )[0]
# get the data slice of original image data, data_focus
lowercase__: List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , _UpperCAmelCase ):
lowercase__: Tuple = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_UpperCAmelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase__: Optional[int] = []
lowercase__: Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_UpperCAmelCase ):
lowercase__: str = []
for i_focus in range(len(_UpperCAmelCase ) ):
lowercase__: Any = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(
_UpperCAmelCase , _UpperCAmelCase )
data_featuremap.append(_UpperCAmelCase )
# expanding the data slice to One dimenssion
lowercase__: Union[str, Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_UpperCAmelCase ) )
lowercase__: Any = np.asarray(_UpperCAmelCase )
return focus_list, data_featuremap
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="average_pool" ):
# pooling process
lowercase__: List[Any] = len(featuremaps[0] )
lowercase__: Any = int(size_map / size_pooling )
lowercase__: List[Any] = []
for i_map in range(len(_UpperCAmelCase ) ):
lowercase__: Any = featuremaps[i_map]
lowercase__: Tuple = []
for i_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j_focus in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_UpperCAmelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_UpperCAmelCase ) )
lowercase__: str = np.asmatrix(_UpperCAmelCase ).reshape(_UpperCAmelCase , _UpperCAmelCase )
featuremap_pooled.append(_UpperCAmelCase )
return featuremap_pooled
def _snake_case ( self , _UpperCAmelCase ):
# expanding three dimension data to one dimension list
lowercase__: Optional[Any] = []
for i in range(len(_UpperCAmelCase ) ):
lowercase__: Any = np.shape(data[i] )
lowercase__: List[Any] = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase__: List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(_UpperCAmelCase )
lowercase__: List[str] = np.asarray(_UpperCAmelCase )
return data_expanded
def _snake_case ( self , _UpperCAmelCase ):
# expanding matrix to one dimension list
lowercase__: Union[str, Any] = np.asarray(_UpperCAmelCase )
lowercase__: List[str] = np.shape(_UpperCAmelCase )
lowercase__: List[Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = []
lowercase__: List[str] = 0
for i_map in range(_UpperCAmelCase ):
lowercase__: Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[Any] = pd_pool[
i_pool
]
lowercase__: List[Any] = i_pool + 1
lowercase__: str = np.multiply(
_UpperCAmelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_UpperCAmelCase )
return pd_all
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_UpperCAmelCase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_UpperCAmelCase )) )
lowercase__: Tuple = 0
lowercase__: Tuple = []
lowercase__: Optional[int] = 10000
while rp < n_repeat and mse >= error_accuracy:
lowercase__: Tuple = 0
print(F"""-------------Learning Time {rp}--------------""" )
for p in range(len(_UpperCAmelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase__: List[Any] = np.asmatrix(datas_train[p] )
lowercase__: Optional[int] = np.asarray(datas_teach[p] )
lowercase__, lowercase__: List[str] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: Optional[int] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: int = np.shape(_UpperCAmelCase )
lowercase__: Optional[Any] = self._expand(_UpperCAmelCase )
lowercase__: Any = data_bp_input
lowercase__: Any = np.dot(_UpperCAmelCase , self.vji.T ) - self.thre_bpa
lowercase__: str = self.sig(_UpperCAmelCase )
lowercase__: Optional[Any] = np.dot(_UpperCAmelCase , self.wkj.T ) - self.thre_bpa
lowercase__: Dict = self.sig(_UpperCAmelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase__: str = np.multiply(
(data_teach - bp_outa) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: str = np.multiply(
np.dot(_UpperCAmelCase , self.wkj ) , np.multiply(_UpperCAmelCase , (1 - bp_outa) ) )
lowercase__: Dict = np.dot(_UpperCAmelCase , self.vji )
lowercase__: Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase__: List[str] = pd_conva_pooled.T.getA().tolist()
lowercase__: Optional[Any] = self._calculate_gradient_from_pool(
_UpperCAmelCase , _UpperCAmelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase__: str = self._expand_mat(pd_conva_all[k_conv] )
lowercase__: str = self.rate_weight * np.dot(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase__: List[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase__: Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase__: List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase__: List[str] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase__: Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase__: Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase__: str = rp + 1
lowercase__: Optional[Any] = error_count / patterns
all_mse.append(_UpperCAmelCase )
def draw_error():
lowercase__: Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_UpperCAmelCase , '''+-''' )
plt.plot(_UpperCAmelCase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_UpperCAmelCase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def _snake_case ( self , _UpperCAmelCase ):
# model predict
lowercase__: Union[str, Any] = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_UpperCAmelCase )) )
for p in range(len(_UpperCAmelCase ) ):
lowercase__: Union[str, Any] = np.asmatrix(datas_test[p] )
lowercase__, lowercase__: Any = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[str] = self.pooling(_UpperCAmelCase , self.size_poolinga )
lowercase__: str = self._expand(_UpperCAmelCase )
lowercase__: List[Any] = data_bp_input
lowercase__: List[str] = bp_outa * self.vji.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
lowercase__: Optional[int] = bp_outa * self.wkj.T - self.thre_bpa
lowercase__: Any = self.sig(_UpperCAmelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase__: str = [list(map(self.do_round , _UpperCAmelCase ) ) for each in produce_out]
return np.asarray(_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
# return the data of image after convoluting process so we can check it out
lowercase__: int = np.asmatrix(_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.convolute(
_UpperCAmelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase__: List[Any] = self.pooling(_UpperCAmelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 2 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 356 | """simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = CTRLTokenizer
_UpperCAmelCase :Any = False
_UpperCAmelCase :List[Any] = False
def _snake_case ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__: Dict = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__: Any = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
lowercase__: Optional[int] = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__: Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _snake_case ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Optional[int] = '''adapt react readapt apt'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__: Optional[int] = '''adapt react readapt apt'''
lowercase__: Any = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__: Optional[Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = tokens + [tokenizer.unk_token]
lowercase__: str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 2 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = "lxmert"
_UpperCAmelCase :List[str] = {}
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=9500 , _UpperCAmelCase=1600 , _UpperCAmelCase=400 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=9 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=2048 , _UpperCAmelCase=4 , _UpperCAmelCase=6.67 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , **_UpperCAmelCase , ):
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Any = num_attention_heads
lowercase__: Tuple = hidden_act
lowercase__: Any = intermediate_size
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: int = max_position_embeddings
lowercase__: Tuple = type_vocab_size
lowercase__: Tuple = initializer_range
lowercase__: str = layer_norm_eps
lowercase__: Tuple = num_qa_labels
lowercase__: Dict = num_object_labels
lowercase__: Any = num_attr_labels
lowercase__: Any = l_layers
lowercase__: Union[str, Any] = x_layers
lowercase__: Optional[Any] = r_layers
lowercase__: List[Any] = visual_feat_dim
lowercase__: str = visual_pos_dim
lowercase__: Dict = visual_loss_normalizer
lowercase__: str = task_matched
lowercase__: Optional[int] = task_mask_lm
lowercase__: Tuple = task_obj_predict
lowercase__: Any = task_qa
lowercase__: int = visual_obj_loss
lowercase__: Tuple = visual_attr_loss
lowercase__: Tuple = visual_feat_loss
lowercase__: Any = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**_UpperCAmelCase )
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
__A = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
__A = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
__A = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
__A = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
__A = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
__A = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(6_4, 6_4)
)
__A = tf.keras.preprocessing.image.img_to_array(test_image)
__A = np.expand_dims(test_image, axis=0)
__A = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
__A = "Normal"
if result[0][0] == 1:
__A = "Abnormality detected"
| 358 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "cvt"
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=[7, 3, 3] , _UpperCAmelCase=[4, 2, 2] , _UpperCAmelCase=[2, 1, 1] , _UpperCAmelCase=[64, 192, 384] , _UpperCAmelCase=[1, 3, 6] , _UpperCAmelCase=[1, 2, 10] , _UpperCAmelCase=[4.0, 4.0, 4.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.0] , _UpperCAmelCase=[0.0, 0.0, 0.1] , _UpperCAmelCase=[True, True, True] , _UpperCAmelCase=[False, False, True] , _UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase=[3, 3, 3] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[2, 2, 2] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=[1, 1, 1] , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Dict = num_channels
lowercase__: str = patch_sizes
lowercase__: Optional[Any] = patch_stride
lowercase__: List[str] = patch_padding
lowercase__: Optional[Any] = embed_dim
lowercase__: Optional[int] = num_heads
lowercase__: Any = depth
lowercase__: str = mlp_ratio
lowercase__: Any = attention_drop_rate
lowercase__: Any = drop_rate
lowercase__: Optional[Any] = drop_path_rate
lowercase__: Dict = qkv_bias
lowercase__: Dict = cls_token
lowercase__: Any = qkv_projection_method
lowercase__: List[str] = kernel_qkv
lowercase__: Union[str, Any] = padding_kv
lowercase__: Optional[int] = stride_kv
lowercase__: int = padding_q
lowercase__: Dict = stride_q
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = layer_norm_eps
| 2 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A = "http://www.mocksite.com/file1.txt"
__A = "\"text\": [\"foo\", \"foo\"]"
__A = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :List[Any] = 200
_UpperCAmelCase :str = {"Content-Length": "100"}
_UpperCAmelCase :Optional[Any] = {}
def _snake_case ( self , **_UpperCAmelCase ):
return [bytes(_UpperCAmelCase , '''utf-8''' )]
def SCREAMING_SNAKE_CASE__ ( *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
import requests
monkeypatch.setattr(__UpperCAmelCase , '''request''' , __UpperCAmelCase )
lowercase__: Union[str, Any] = URL
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: Union[str, Any] = url
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: List[str] = [url]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: Tuple = {'''train''': url}
lowercase__: Union[str, Any] = '''dummy'''
lowercase__: List[Any] = '''downloads'''
lowercase__: Tuple = tmp_path
lowercase__: Any = DownloadConfig(
cache_dir=os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , use_etag=__UpperCAmelCase , )
lowercase__: Any = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
lowercase__: str = dl_manager.download(__UpperCAmelCase )
lowercase__: Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: Dict = [downloaded_paths]
lowercase__: Tuple = [urls]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in downloaded_paths.keys()
lowercase__: Optional[Any] = downloaded_paths.values()
lowercase__: Any = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase__: Optional[Any] = Path(__UpperCAmelCase )
lowercase__: int = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase__: Tuple = downloaded_path.read_text()
assert content == CONTENT
lowercase__: Dict = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
lowercase__: Optional[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowercase__: List[Any] = str(__UpperCAmelCase )
if issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: int = filename
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: str = [filename]
elif issubclass(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: Dict = {'''train''': filename}
lowercase__: int = '''dummy'''
lowercase__: Optional[int] = xz_file.parent
lowercase__: Dict = '''extracted'''
lowercase__: Dict = DownloadConfig(
cache_dir=__UpperCAmelCase , use_etag=__UpperCAmelCase , )
lowercase__: int = DownloadManager(dataset_name=__UpperCAmelCase , download_config=__UpperCAmelCase )
lowercase__: Any = dl_manager.extract(__UpperCAmelCase )
lowercase__: Dict = paths
for extracted_paths in [extracted_paths]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__: Optional[Any] = [extracted_paths]
lowercase__: Union[str, Any] = [paths]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
assert "train" in extracted_paths.keys()
lowercase__: Tuple = extracted_paths.values()
lowercase__: List[Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__UpperCAmelCase , __UpperCAmelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase__: Dict = Path(__UpperCAmelCase )
lowercase__: Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__UpperCAmelCase , etag=__UpperCAmelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase__: Optional[Any] = extracted_path.read_text()
lowercase__: Any = text_file.read_text()
assert extracted_file_content == expected_file_content
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__UpperCAmelCase , start=1 ):
lowercase__: Dict = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
lowercase__: List[Any] = request.getfixturevalue(__UpperCAmelCase )
lowercase__: Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
lowercase__: Optional[Any] = request.getfixturevalue(__UpperCAmelCase )
lowercase__: List[Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCAmelCase ) , start=1 ):
_test_jsonl(__UpperCAmelCase , __UpperCAmelCase )
assert num_tar == 1
assert num_jsonl == 2
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
lowercase__: List[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__UpperCAmelCase ) , start=1 ):
assert os.path.basename(__UpperCAmelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 359 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
__A = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "rag"
_UpperCAmelCase :List[Any] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=" / " , _UpperCAmelCase=" // " , _UpperCAmelCase=5 , _UpperCAmelCase=300 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase="wiki_dpr" , _UpperCAmelCase="train" , _UpperCAmelCase="compressed" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
bos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , forced_eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , prefix=_UpperCAmelCase , vocab_size=_UpperCAmelCase , **_UpperCAmelCase , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
lowercase__: Optional[Any] = kwargs.pop('''question_encoder''' )
lowercase__: Any = question_encoder_config.pop('''model_type''' )
lowercase__: Tuple = kwargs.pop('''generator''' )
lowercase__: Union[str, Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowercase__: Optional[int] = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Any = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: str = reduce_loss
lowercase__: str = label_smoothing
lowercase__: Dict = exclude_bos_score
lowercase__: Any = do_marginalize
lowercase__: Optional[int] = title_sep
lowercase__: Any = doc_sep
lowercase__: Any = n_docs
lowercase__: List[Any] = max_combined_length
lowercase__: int = dataset
lowercase__: int = dataset_split
lowercase__: str = index_name
lowercase__: Dict = retrieval_vector_size
lowercase__: Dict = retrieval_batch_size
lowercase__: List[str] = passages_path
lowercase__: str = index_path
lowercase__: Optional[Any] = use_dummy_dataset
lowercase__: str = output_retrieved
lowercase__: List[str] = do_deduplication
lowercase__: List[Any] = use_cache
if self.forced_eos_token_id is None:
lowercase__: int = getattr(self.generator , '''forced_eos_token_id''' , _UpperCAmelCase )
@classmethod
def _snake_case ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = copy.deepcopy(self.__dict__ )
lowercase__: str = self.question_encoder.to_dict()
lowercase__: str = self.generator.to_dict()
lowercase__: str = self.__class__.model_type
return output
| 2 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int:
lowercase__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase__ = 4
lowercase__ = 4_8
lowercase__ = '''pixelshuffle_aux'''
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ = [6, 6, 6, 6]
lowercase__ = 6_0
lowercase__ = [6, 6, 6, 6]
lowercase__ = '''pixelshuffledirect'''
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ = 4
lowercase__ = '''nearest+conv'''
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase__ = 1
lowercase__ = 1
lowercase__ = 1_2_6
lowercase__ = 7
lowercase__ = 2_5_5.0
lowercase__ = ''''''
return config
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
if "patch_embed.proj" in name and "layers" not in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
lowercase__ = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
lowercase__ = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
lowercase__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
lowercase__ = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
lowercase__ = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
lowercase__ = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
lowercase__ = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
lowercase__ = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ = '''layernorm.bias'''
if "conv_first" in name:
lowercase__ = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase__ = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase__ = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
lowercase__ = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
lowercase__ = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
lowercase__ = '''upsample.''' + name
elif config.upsampler == "pixelshuffledirect":
lowercase__ = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
lowercase__ = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
lowercase__ = '''swin2sr.''' + name
return name
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
for key in orig_state_dict.copy().keys():
lowercase__ = orig_state_dict.pop(__UpperCAmelCase )
if "qkv" in key:
lowercase__ = key.split('''.''' )
lowercase__ = int(key_split[1] )
lowercase__ = int(key_split[4] )
lowercase__ = config.embed_dim
if "weight" in key:
lowercase__ = val[:dim, :]
lowercase__ = val[dim : dim * 2, :]
lowercase__ = val[-dim:, :]
else:
lowercase__ = val[:dim]
lowercase__ = val[dim : dim * 2]
lowercase__ = val[-dim:]
pass
else:
lowercase__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
lowercase__ = get_config(__UpperCAmelCase )
lowercase__ = SwinaSRForImageSuperResolution(__UpperCAmelCase )
model.eval()
lowercase__ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )
lowercase__ = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
lowercase__ = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(__UpperCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"""Unexpected key {key} in state_dict""" )
# verify values
lowercase__ = '''https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'''
lowercase__ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('''RGB''' )
lowercase__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase__ = 1_2_6 if '''Jpeg''' in checkpoint_url else 2_5_6
lowercase__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowercase__ = transforms(__UpperCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowercase__ = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase__ = model(__UpperCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowercase__ = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase__ = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase__ = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase__ = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowercase__ = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase__ = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase__ = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __UpperCAmelCase , atol=1e-3 )
print('''Looks ok!''' )
lowercase__ = {
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''': (
'''swin2SR-classical-sr-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth''': (
'''swin2SR-classical-sr-x4-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth''': (
'''swin2SR-compressed-sr-x4-48'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth''': (
'''swin2SR-lightweight-x2-64'''
),
'''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth''': (
'''swin2SR-realworld-sr-x4-64-bsrgan-psnr'''
),
}
lowercase__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
model.push_to_hub(F"""caidas/{model_name}""" )
processor.push_to_hub(F"""caidas/{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
__A = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 360 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: Dict = f.read()
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(os.path.isfile(_UpperCAmelCase ) )
# File is cached at the same place the second time.
lowercase__: Any = cached_file(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# Using a specific revision to test the full commit hash.
lowercase__: Dict = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCAmelCase , os.path.join(_UpperCAmelCase , '''snapshots''' , _UpperCAmelCase , _UpperCAmelCase ) )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
lowercase__: int = cached_file('''tiny-random-bert''' , _UpperCAmelCase )
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
lowercase__: List[Any] = cached_file(_UpperCAmelCase , _UpperCAmelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' )
def _snake_case ( self ):
with self.assertRaisesRegex(_UpperCAmelCase , '''does not appear to have a file named''' ):
lowercase__: Optional[Any] = cached_file(_UpperCAmelCase , '''conf''' )
with open(os.path.join(_UpperCAmelCase , '''refs''' , '''main''' ) ) as f:
lowercase__: int = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCAmelCase , '''.no_exist''' , _UpperCAmelCase , '''conf''' ) ) )
lowercase__: Dict = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: List[str] = cached_file(_UpperCAmelCase , '''conf''' , local_files_only=_UpperCAmelCase , _raise_exceptions_for_missing_entries=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
lowercase__: Union[str, Any] = mock.Mock()
lowercase__: str = 500
lowercase__: Union[str, Any] = {}
lowercase__: List[str] = HTTPError
lowercase__: int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCAmelCase ) as mock_head:
lowercase__: Any = cached_file(_UpperCAmelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCAmelCase )
self.assertIsNone(_UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self ):
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCAmelCase ) )
def _snake_case ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCAmelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase , revision='''ahaha''' )
lowercase__: Optional[Any] = get_file_from_repo('''bert-base-cased''' , _UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowercase__: Optional[Any] = json.loads(open(_UpperCAmelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def _snake_case ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__: Any = Path(_UpperCAmelCase ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCAmelCase , '''a.txt''' ) , str(_UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCAmelCase , '''b.txt''' ) )
| 2 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: List[Any] = parent
lowercase__: List[str] = batch_size
lowercase__: List[str] = seq_length
lowercase__: List[str] = is_training
lowercase__: Optional[int] = use_input_mask
lowercase__: int = use_token_type_ids
lowercase__: List[Any] = use_labels
lowercase__: int = vocab_size
lowercase__: Union[str, Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: List[str] = num_attention_heads
lowercase__: Dict = intermediate_size
lowercase__: Any = hidden_act
lowercase__: Dict = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: Dict = type_vocab_size
lowercase__: Dict = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: List[Any] = num_labels
lowercase__: Dict = num_choices
lowercase__: List[Any] = scope
def _snake_case ( self ):
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Optional[int] = None
if self.use_input_mask:
lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Optional[Any] = None
if self.use_token_type_ids:
lowercase__: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[int] = None
lowercase__: List[str] = None
lowercase__: List[str] = None
if self.use_labels:
lowercase__: Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Any = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = NystromformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Tuple = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: List[str] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = NystromformerForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = NystromformerForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Dict = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = self.num_labels
lowercase__: int = NystromformerForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = self.num_labels
lowercase__: Union[str, Any] = NystromformerForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = self.num_choices
lowercase__: Optional[int] = NystromformerForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: int = self.prepare_config_and_inputs()
(
lowercase__
): List[Any] = config_and_inputs
lowercase__: Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Union[str, Any] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Any = False
def _snake_case ( self ):
lowercase__: Optional[Any] = NystromformerModelTester(self )
lowercase__: Any = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__: List[str] = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def _snake_case ( self ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: str = NystromformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_torch
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Dict = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase__: Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowercase__: Any = model(_UpperCAmelCase )[0]
lowercase__: str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: Union[str, Any] = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def _snake_case ( self ):
lowercase__: List[Any] = '''the [MASK] of Belgium is Brussels'''
lowercase__: str = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase__: Optional[int] = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
lowercase__: Any = tokenizer(_UpperCAmelCase , return_tensors='''pt''' )
with torch.no_grad():
lowercase__: Optional[int] = model(encoding.input_ids ).logits
lowercase__: str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_UpperCAmelCase ) , '''capital''' )
| 361 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[Any] = "beit"
def __init__( self , _UpperCAmelCase=8192 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=[3, 5, 7, 11] , _UpperCAmelCase=[1, 2, 3, 6] , _UpperCAmelCase=True , _UpperCAmelCase=0.4 , _UpperCAmelCase=256 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=255 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: int = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: List[str] = initializer_range
lowercase__: Optional[int] = layer_norm_eps
lowercase__: int = image_size
lowercase__: Tuple = patch_size
lowercase__: int = num_channels
lowercase__: Optional[Any] = use_mask_token
lowercase__: List[Any] = use_absolute_position_embeddings
lowercase__: Optional[int] = use_relative_position_bias
lowercase__: Optional[int] = use_shared_relative_position_bias
lowercase__: Optional[Any] = layer_scale_init_value
lowercase__: Union[str, Any] = drop_path_rate
lowercase__: Tuple = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase__: Tuple = out_indices
lowercase__: Optional[int] = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase__: List[str] = use_auxiliary_head
lowercase__: Optional[Any] = auxiliary_loss_weight
lowercase__: str = auxiliary_channels
lowercase__: List[str] = auxiliary_num_convs
lowercase__: Tuple = auxiliary_concat_input
lowercase__: Dict = semantic_loss_ignore_index
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = version.parse("1.11" )
@property
def _snake_case ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self ):
return 1e-4
| 2 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str = field(
metadata={"help": "The output directory where the model will be written."} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The encoder model checkpoint for weights initialization."
"Don't set if you want to train an encoder model from scratch."
)
} ,)
_UpperCAmelCase :str = field(
metadata={
"help": (
"The decoder model checkpoint for weights initialization."
"Don't set if you want to train a decoder model from scratch."
)
} ,)
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained encoder config name or path if not the same as encoder_model_name"} )
_UpperCAmelCase :Optional[str] = field(
default=_UpperCAmelCase ,metadata={"help": "Pretrained decoder config name or path if not the same as decoder_model_name"} )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
lowercase__: Dict = HfArgumentParser((ModelArguments,) )
(lowercase__ ): List[str] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__: List[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__: int = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__: str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__: Tuple = True
lowercase__: int = True
lowercase__: Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=__UpperCAmelCase , decoder_config=__UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__: int = decoder_config.decoder_start_token_id
lowercase__: Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__: Tuple = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__: Optional[int] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__: Optional[Any] = decoder_config.eos_token_id
lowercase__: Tuple = decoder_start_token_id
lowercase__: Dict = pad_token_id
lowercase__: Optional[int] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__: Union[str, Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__: Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 362 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: int = ''''''
for word_or_phrase in separated:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 2 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = "time_series_transformer"
_UpperCAmelCase :Union[str, Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "student_t" , _UpperCAmelCase = "nll" , _UpperCAmelCase = 1 , _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCAmelCase = "mean" , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 32 , _UpperCAmelCase = 32 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = True , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 64 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.02 , _UpperCAmelCase=True , **_UpperCAmelCase , ):
# time series specific configuration
lowercase__: Union[str, Any] = prediction_length
lowercase__: str = context_length or prediction_length
lowercase__: int = distribution_output
lowercase__: Dict = loss
lowercase__: str = input_size
lowercase__: Optional[Any] = num_time_features
lowercase__: str = lags_sequence
lowercase__: List[Any] = scaling
lowercase__: Optional[int] = num_dynamic_real_features
lowercase__: Tuple = num_static_real_features
lowercase__: Tuple = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase__: Optional[Any] = cardinality
else:
lowercase__: Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase__: Union[str, Any] = embedding_dimension
else:
lowercase__: Tuple = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__: List[str] = num_parallel_samples
# Transformer architecture configuration
lowercase__: int = input_size * len(_UpperCAmelCase ) + self._number_of_features
lowercase__: Tuple = d_model
lowercase__: Optional[Any] = encoder_attention_heads
lowercase__: Optional[int] = decoder_attention_heads
lowercase__: List[str] = encoder_ffn_dim
lowercase__: Any = decoder_ffn_dim
lowercase__: List[str] = encoder_layers
lowercase__: Optional[Any] = decoder_layers
lowercase__: Tuple = dropout
lowercase__: Dict = attention_dropout
lowercase__: Optional[int] = activation_dropout
lowercase__: Optional[int] = encoder_layerdrop
lowercase__: int = decoder_layerdrop
lowercase__: Union[str, Any] = activation_function
lowercase__: Tuple = init_std
lowercase__: List[Any] = use_cache
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _snake_case ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 363 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2 | 0 |
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> str:
return "".join(sorted(__UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> list[str]:
return word_by_signature[signature(__UpperCAmelCase )]
__A = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__A = sorted({word.strip().lower() for word in data.splitlines()})
__A = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 364 | """simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = DebertaVaTokenizer
_UpperCAmelCase :Tuple = DebertaVaTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :int = True
def _snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[Any] = DebertaVaTokenizer(_UpperCAmelCase , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: List[str] = '''this is a test'''
lowercase__: int = '''this is a test'''
return input_text, output_text
def _snake_case ( self ):
lowercase__: Optional[int] = '''<pad>'''
lowercase__: Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_UpperCAmelCase ) , 30001 )
def _snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _snake_case ( self ):
# fmt: off
lowercase__: int = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: List[str] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
lowercase__: Any = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
# fmt: off
lowercase__: Dict = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Tuple = DebertaVaTokenizerFast(_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Any = '''I was born in 92000, and this is falsé.'''
lowercase__: str = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Union[str, Any] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowercase__: int = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# fmt: off
lowercase__: Optional[int] = ''' \tHeLLo!how \n Are yoU? '''
lowercase__: str = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
lowercase__: Dict = DebertaVaTokenizer(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = DebertaVaTokenizerFast(_UpperCAmelCase , do_lower_case=_UpperCAmelCase , split_by_punct=_UpperCAmelCase )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.get_tokenizer()
lowercase__: List[Any] = self.get_rust_tokenizer()
lowercase__: List[str] = '''I was born in 92000, and this is falsé.'''
lowercase__: Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
lowercase__: List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__: Tuple = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = self.get_rust_tokenizer()
lowercase__: str = tokenizer.encode(_UpperCAmelCase )
lowercase__: Any = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''This is a test'''
lowercase__: str = [13, 1, 4398, 25, 21, 1289]
lowercase__: List[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: Any = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
lowercase__: int = DebertaVaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: int = DebertaVaTokenizerFast(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__: Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Any = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Union[str, Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: str = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# fmt: off
lowercase__: str = '''I was born in 92000, and this is falsé.'''
lowercase__: Dict = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
lowercase__: Tuple = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
lowercase__: Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
lowercase__: Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: List[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Dict = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[int] = DebertaVaTokenizer(_UpperCAmelCase )
lowercase__: Optional[int] = tokenizer.encode('''sequence builders''' )
lowercase__: Optional[Any] = tokenizer.encode('''multi-sequence build''' )
lowercase__: Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
lowercase__: Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _UpperCAmelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _UpperCAmelCase , )
@slow
def _snake_case ( self ):
# fmt: off
lowercase__: List[Any] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 2 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
try:
lowercase__: List[Any] = int(__UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase__: Optional[Any] = 2
lowercase__: Union[str, Any] = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase__: Optional[int] = i
while n % i == 0:
lowercase__: Union[str, Any] = n // i
i += 1
return int(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 365 | """simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 2 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 366 | """simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__A = logging.get_logger(__name__)
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _UpperCAmelCase , )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
| 2 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
lowercase__: int = parent
lowercase__: List[Any] = batch_size
lowercase__: List[Any] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: List[Any] = use_attention_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: Optional[Any] = use_labels
lowercase__: Any = vocab_size
lowercase__: Optional[int] = hidden_size
lowercase__: int = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: int = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Optional[Any] = attention_probs_dropout_prob
lowercase__: Optional[Any] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: List[Any] = type_sequence_label_size
lowercase__: Any = initializer_range
lowercase__: Optional[int] = num_choices
def _snake_case ( self ):
lowercase__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: Optional[int] = None
if self.use_attention_mask:
lowercase__: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: Dict = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , )
return config, input_ids, attention_mask
def _snake_case ( self ):
lowercase__: Dict = self.prepare_config_and_inputs()
lowercase__: Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ):
lowercase__: Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
lowercase__: List[str] = model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowercase__: Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: List[Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase__: Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase__: Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
lowercase__: int = (1, 11, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[str] = np.array([[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
| 367 | """simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
__A = 2_5_6
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = ["melgan"]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
# From MELGAN
lowercase__: Union[str, Any] = math.log(1e-5 ) # Matches MelGAN training.
lowercase__: Union[str, Any] = 4.0 # Largest value for most examples
lowercase__: Union[str, Any] = 128
self.register_modules(
notes_encoder=_UpperCAmelCase , continuous_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase , scheduler=_UpperCAmelCase , melgan=_UpperCAmelCase , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: int = output_range
if clip:
lowercase__: Any = torch.clip(_UpperCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase__: Optional[int] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=(-1.0, 1.0) , _UpperCAmelCase=False ):
lowercase__, lowercase__: str = input_range
lowercase__: Dict = torch.clip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip else outputs
# Scale to [0, 1].
lowercase__: Tuple = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = input_tokens > 0
lowercase__, lowercase__: str = self.notes_encoder(
encoder_input_tokens=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
lowercase__, lowercase__: Optional[int] = self.continuous_encoder(
encoder_inputs=_UpperCAmelCase , encoder_inputs_mask=_UpperCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = noise_time
if not torch.is_tensor(_UpperCAmelCase ):
lowercase__: Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase__: str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__: Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase__: Union[str, Any] = self.decoder(
encodings_and_masks=_UpperCAmelCase , decoder_input_tokens=_UpperCAmelCase , decoder_noise_time=_UpperCAmelCase )
return logits
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = 100 , _UpperCAmelCase = True , _UpperCAmelCase = "numpy" , _UpperCAmelCase = None , _UpperCAmelCase = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(_UpperCAmelCase )}.""" )
lowercase__: List[str] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase__: Any = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase__: Tuple = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCAmelCase ):
if i == 0:
lowercase__: str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase__: Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase__: Union[str, Any] = ones
lowercase__: str = self.scale_features(
_UpperCAmelCase , output_range=[-1.0, 1.0] , clip=_UpperCAmelCase )
lowercase__: Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCAmelCase , continuous_mask=_UpperCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase__: int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase__: List[Any] = self.decode(
encodings_and_masks=_UpperCAmelCase , input_tokens=_UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase__: Union[str, Any] = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
lowercase__: int = self.scale_to_features(_UpperCAmelCase , input_range=[-1.0, 1.0] )
lowercase__: Dict = mel[:1]
lowercase__: List[Any] = mel.cpu().float().numpy()
lowercase__: Optional[int] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCAmelCase , _UpperCAmelCase )
logger.info('''Generated segment''' , _UpperCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase__: Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase__: Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCAmelCase )
| 2 | 0 |
from ...processing_utils import ProcessorMixin
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = "SpeechT5FeatureExtractor"
_UpperCAmelCase :Optional[int] = "SpeechT5Tokenizer"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Optional[Any] = kwargs.pop('''audio''' , _UpperCAmelCase )
lowercase__: Tuple = kwargs.pop('''text''' , _UpperCAmelCase )
lowercase__: Dict = kwargs.pop('''text_target''' , _UpperCAmelCase )
lowercase__: Dict = kwargs.pop('''audio_target''' , _UpperCAmelCase )
lowercase__: Union[str, Any] = kwargs.pop('''sampling_rate''' , _UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowercase__: List[Any] = self.feature_extractor(_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
elif text is not None:
lowercase__: Optional[int] = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
else:
lowercase__: Optional[Any] = None
if audio_target is not None:
lowercase__: Union[str, Any] = self.feature_extractor(audio_target=_UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Tuple = targets['''input_values''']
elif text_target is not None:
lowercase__: Dict = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Union[str, Any] = targets['''input_ids''']
else:
lowercase__: Tuple = None
if inputs is None:
return targets
if targets is not None:
lowercase__: Union[str, Any] = labels
lowercase__: Tuple = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__: Any = decoder_attention_mask
return inputs
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
lowercase__: Any = kwargs.pop('''input_values''' , _UpperCAmelCase )
lowercase__: List[Any] = kwargs.pop('''input_ids''' , _UpperCAmelCase )
lowercase__: Any = kwargs.pop('''labels''' , _UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowercase__: List[str] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif input_ids is not None:
lowercase__: str = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
else:
lowercase__: Tuple = None
if labels is not None:
if "input_ids" in labels or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and "input_ids" in labels[0]):
lowercase__: Tuple = self.tokenizer.pad(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: Union[str, Any] = targets['''input_ids''']
else:
lowercase__: List[Any] = self.feature_extractor.feature_size
lowercase__: List[Any] = self.feature_extractor.num_mel_bins
lowercase__: Union[str, Any] = self.feature_extractor.pad(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
lowercase__: List[Any] = feature_size_hack
lowercase__: str = targets['''input_values''']
else:
lowercase__: Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowercase__: Dict = labels
lowercase__: Optional[int] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase__: Any = decoder_attention_mask
return inputs
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def _snake_case ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
| 368 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2 | 0 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
__A = "docs/source/en/_toctree.yml"
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
lowercase__: int = defaultdict(__UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase__: List[str] = [key for key, value in counts.items() if value > 1]
lowercase__: Optional[Any] = []
for duplicate_key in duplicates:
lowercase__: Optional[int] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__UpperCAmelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : s["title"].lower() )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase=False ) -> str:
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Dict = yaml.safe_load(f.read() )
# Get to the API doc
lowercase__: int = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase__: int = content[api_idx]['''sections''']
# Then to the model doc
lowercase__: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase__: Union[str, Any] = api_doc[model_idx]['''sections''']
lowercase__: int = [(idx, section) for idx, section in enumerate(__UpperCAmelCase ) if '''sections''' in section]
lowercase__: List[Any] = False
for idx, modality_doc in modalities_docs:
lowercase__: List[Any] = modality_doc['''sections''']
lowercase__: List[Any] = clean_model_doc_toc(__UpperCAmelCase )
if old_modality_doc != new_modality_doc:
lowercase__: Dict = True
if overwrite:
lowercase__: int = new_modality_doc
if diff:
if overwrite:
lowercase__: Optional[Any] = model_doc
lowercase__: List[str] = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__A = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 369 | """simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Optional[int] = batch_size
lowercase__: List[str] = seq_length
lowercase__: Optional[int] = is_training
lowercase__: Dict = use_input_mask
lowercase__: List[Any] = use_token_type_ids
lowercase__: List[str] = use_labels
lowercase__: Union[str, Any] = vocab_size
lowercase__: str = hidden_size
lowercase__: Any = embedding_size
lowercase__: Any = num_hidden_layers
lowercase__: Any = num_attention_heads
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: List[Any] = hidden_dropout_prob
lowercase__: Dict = attention_probs_dropout_prob
lowercase__: Optional[int] = max_position_embeddings
lowercase__: List[Any] = type_vocab_size
lowercase__: Tuple = type_sequence_label_size
lowercase__: Optional[int] = initializer_range
lowercase__: Dict = num_labels
lowercase__: int = num_choices
lowercase__: int = scope
def _snake_case ( self ):
lowercase__: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_input_mask:
lowercase__: Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[Any] = None
lowercase__: Any = None
lowercase__: str = None
if self.use_labels:
lowercase__: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = MobileBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: Dict = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
lowercase__: str = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = MobileBertForNextSentencePrediction(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForPreTraining(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: str = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Any = MobileBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: int = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Any = MobileBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: str = self.num_labels
lowercase__: Union[str, Any] = MobileBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Dict = self.num_choices
lowercase__: Union[str, Any] = MobileBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowercase__: List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__: Optional[Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
lowercase__: Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
): Union[str, Any] = config_and_inputs
lowercase__: Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :Optional[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[Any] = True
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
lowercase__: int = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
lowercase__: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase )
lowercase__: Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase )
return inputs_dict
def _snake_case ( self ):
lowercase__: int = MobileBertModelTester(self )
lowercase__: Dict = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> List[Any]:
return torch.tensor(
__UpperCAmelCase , dtype=torch.long , device=__UpperCAmelCase , )
__A = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Tuple = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(_UpperCAmelCase )
lowercase__: Tuple = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
lowercase__: Tuple = model(_UpperCAmelCase )[0]
lowercase__: Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6e0_7, 8.2_6_9_1_6_5_6e0_4, 1.6_5_2_1_8_3_8e0_5],
[-5.7_5_4_1_7_0_4e-0_1, 3.9_0_5_6_0_2_2e0_0, 4.4_0_1_1_5_0_7e0_0],
[2.6_0_4_7_3_5_9e0_0, 1.5_6_7_7_6_5_2e0_0, -1.7_3_2_4_1_8_8e-0_1],
]
] , device=_UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowercase__: int = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowercase__: Optional[int] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 2 | 0 |
"""simple docstring"""
import unittest
from transformers import DonutProcessor
__A = "naver-clova-ix/donut-base"
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: int = DonutProcessor.from_pretrained(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Tuple = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowercase__: Union[str, Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowercase__: str = self.processor.tokenajson(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , _UpperCAmelCase )
| 370 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
lowercase__: Optional[int] = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__UpperCAmelCase , default=1_0_0_0 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCAmelCase , default=5_1_2 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
lowercase__: List[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Any:
def fn(__UpperCAmelCase ):
return tokenizer(examples['''text'''] )
return fn
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Dict:
lowercase__: Dict = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
lowercase__: int = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
lowercase__: List[str] = tf.train.Features(feature=__UpperCAmelCase )
lowercase__: str = tf.train.Example(features=__UpperCAmelCase )
lowercase__: List[str] = example.SerializeToString()
records.append(__UpperCAmelCase )
return records
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[int]:
lowercase__: Any = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__: Union[str, Any] = min(len(__UpperCAmelCase ) , args.limit )
lowercase__: Optional[int] = dataset.select(range(__UpperCAmelCase ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__: Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__: Union[str, Any] = os.path.join(args.output_dir , args.split )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
lowercase__: List[str] = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__: str = tokenize_function(__UpperCAmelCase )
lowercase__: Tuple = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCAmelCase ):
# Concatenate all texts.
lowercase__: Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__: List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__: Optional[int] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__: Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , __UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__: Union[str, Any] = dataset_tokenized.map(__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=1_0_0_0 , num_proc=4 )
lowercase__: int = 0
lowercase__: List[Any] = 0
for shard in range(0 , len(__UpperCAmelCase ) , args.shard_size ):
lowercase__: Any = grouped_dataset[shard : shard + args.shard_size]
lowercase__: Tuple = len(dataset_snapshot['''input_ids'''] )
lowercase__: Any = os.path.join(__UpperCAmelCase , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__: int = get_serialized_examples(__UpperCAmelCase )
with tf.io.TFRecordWriter(__UpperCAmelCase ) as out_file:
for i in range(len(__UpperCAmelCase ) ):
lowercase__: Tuple = serialized_examples[i]
out_file.write(__UpperCAmelCase )
print('''Wrote file {} containing {} records'''.format(__UpperCAmelCase , __UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=__UpperCAmelCase )
if __name__ == "__main__":
__A = parse_args()
main(args)
| 371 | """simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __lowerCamelCase ( __lowerCAmelCase : Callable[[int | float], int | float] , __lowerCAmelCase : int | float , __lowerCAmelCase : int | float , __lowerCAmelCase : int = 1_00 , ) -> float:
snake_case = x_start
snake_case = fnc(__lowerCAmelCase )
snake_case = 0.0
for _ in range(__lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case = (x_end - x_start) / steps + xa
snake_case = fnc(__lowerCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case = xa
snake_case = fxa
return length
if __name__ == "__main__":
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_SCREAMING_SNAKE_CASE = 10
while i <= 100000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 3 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "linear"
snake_case_ = "cosine"
snake_case_ = "cosine_with_restarts"
snake_case_ = "polynomial"
snake_case_ = "constant"
snake_case_ = "constant_with_warmup"
snake_case_ = "piecewise_constant"
def __lowerCamelCase ( __lowerCAmelCase : Optimizer , __lowerCAmelCase : int = -1 ) -> List[Any]:
return LambdaLR(__lowerCAmelCase , lambda __lowerCAmelCase : 1 , last_epoch=__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optimizer , __lowerCAmelCase : int , __lowerCAmelCase : int = -1 ) -> Any:
def lr_lambda(__lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1.0 , __lowerCAmelCase ) )
return 1.0
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optimizer , __lowerCAmelCase : str , __lowerCAmelCase : int = -1 ) -> Dict:
snake_case = {}
snake_case = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
snake_case , snake_case = rule_str.split(""":""" )
snake_case = int(__lowerCAmelCase )
snake_case = float(__lowerCAmelCase )
snake_case = value
snake_case = float(rule_list[-1] )
def create_rules_function(__lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ):
def rule_func(__lowerCAmelCase : int ) -> float:
snake_case = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
snake_case = create_rules_function(__lowerCAmelCase , __lowerCAmelCase )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any=-1 ) -> List[str]:
def lr_lambda(__lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optimizer , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : int = -1 ) -> List[Any]:
def lr_lambda(__lowerCAmelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
snake_case = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optimizer , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int = 1 , __lowerCAmelCase : int = -1 ) -> Dict:
def lr_lambda(__lowerCAmelCase : Optional[Any] ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
snake_case = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int=1e-7 , __lowerCAmelCase : int=1.0 , __lowerCAmelCase : Tuple=-1 ) -> List[Any]:
snake_case = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(__lowerCAmelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
snake_case = lr_init - lr_end
snake_case = num_training_steps - num_warmup_steps
snake_case = 1 - (current_step - num_warmup_steps) / decay_steps
snake_case = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __lowerCAmelCase : Union[str, SchedulerType] , __lowerCAmelCase : Optimizer , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 1 , __lowerCAmelCase : float = 1.0 , __lowerCAmelCase : int = -1 , ) -> Tuple:
snake_case = SchedulerType(__lowerCAmelCase )
snake_case = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCAmelCase , last_epoch=__lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCAmelCase , step_rules=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , num_cycles=__lowerCAmelCase , last_epoch=__lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , power=__lowerCAmelCase , last_epoch=__lowerCAmelCase , )
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
| 3 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
while b:
snake_case , snake_case = b, a % b
return a
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def __lowerCamelCase ( ) -> List[Any]:
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "gptj"
snake_case_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , __snake_case : Tuple=5_04_00 , __snake_case : List[str]=20_48 , __snake_case : Optional[int]=40_96 , __snake_case : Union[str, Any]=28 , __snake_case : Optional[Any]=16 , __snake_case : Optional[Any]=64 , __snake_case : Any=None , __snake_case : List[Any]="gelu_new" , __snake_case : Any=0.0 , __snake_case : Union[str, Any]=0.0 , __snake_case : str=0.0 , __snake_case : Any=1e-5 , __snake_case : List[Any]=0.02 , __snake_case : str=True , __snake_case : Union[str, Any]=5_02_56 , __snake_case : Dict=5_02_56 , __snake_case : List[Any]=False , **__snake_case : Tuple , )-> Any:
snake_case = vocab_size
snake_case = n_positions
snake_case = n_embd
snake_case = n_layer
snake_case = n_head
snake_case = n_inner
snake_case = rotary_dim
snake_case = activation_function
snake_case = resid_pdrop
snake_case = embd_pdrop
snake_case = attn_pdrop
snake_case = layer_norm_epsilon
snake_case = initializer_range
snake_case = use_cache
snake_case = bos_token_id
snake_case = eos_token_id
super().__init__(
bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : PretrainedConfig , __snake_case : str = "default" , __snake_case : List[PatchingSpec] = None , __snake_case : bool = False , )-> Optional[Any]:
super().__init__(__snake_case , task=__snake_case , patching_specs=__snake_case , use_past=__snake_case )
if not getattr(self._config , """pad_token_id""" , __snake_case ):
# TODO: how to do that better?
snake_case = 0
@property
def lowerCAmelCase ( self : str )-> Mapping[str, Mapping[int, str]]:
snake_case = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
snake_case = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase ( self : int )-> int:
return self._config.n_layer
@property
def lowerCAmelCase ( self : Optional[Any] )-> int:
return self._config.n_head
def lowerCAmelCase ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
snake_case = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case = seqlen + 2
snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
snake_case = common_inputs["""attention_mask"""]
if self.use_past:
snake_case = ordered_inputs["""attention_mask"""].dtype
snake_case = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : Dict )-> int:
return 13
| 3 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Dict ) -> Tuple:
snake_case = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
snake_case = DatasetInfosDict.from_directory(__lowerCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : DatasetInfo ) -> Optional[Any]:
snake_case = str(__lowerCAmelCase )
dataset_info.write_to_directory(__lowerCAmelCase )
snake_case = DatasetInfo.from_directory(__lowerCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__lowerCAmelCase , """dataset_info.json""" ) )
def __lowerCamelCase ( ) -> int:
snake_case = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
snake_case = dataset_info._to_yaml_dict()
assert sorted(__lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
snake_case = yaml.safe_dump(__lowerCAmelCase )
snake_case = yaml.safe_load(__lowerCAmelCase )
assert dataset_info_yaml_dict == reloaded
def __lowerCamelCase ( ) -> Dict:
snake_case = DatasetInfo()
snake_case = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : DatasetInfosDict ) -> Optional[Any]:
snake_case = str(__lowerCAmelCase )
dataset_infos_dict.write_to_directory(__lowerCAmelCase )
snake_case = DatasetInfosDict.from_directory(__lowerCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
snake_case = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
snake_case = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__lowerCAmelCase , """README.md""" ) )
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Optional[Any]:
snake_case = r"""\w+[.]\d+"""
snake_case = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
snake_case = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
snake_case = flatten_dict(__lowerCAmelCase )
snake_case = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case = rename_key(__lowerCAmelCase )
snake_case = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
snake_case , snake_case = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 3 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 1 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
snake_case = collections.OrderedDict()
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
snake_case = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case = token.rstrip("""\n""" )
snake_case = index
return vocab
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]:
snake_case = vocab
snake_case = unk_token
snake_case = max_input_chars_per_word
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]:
snake_case = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case = 0
snake_case = []
while start < len(__snake_case ):
snake_case = len(__snake_case )
snake_case = None
while start < end:
snake_case = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
snake_case = end
return sub_tokens
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
snake_case = bod_token
snake_case = eod_token
snake_case = load_vocab(__snake_case )
snake_case = self.encoder[space_token]
snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : str )-> Tuple:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] )-> int:
return len(self.encoder )
def lowerCAmelCase ( self : Any )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]:
snake_case = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]:
snake_case = [i for i in token_ids if i >= 0]
snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]:
return token in self.encoder
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
return "".join(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if os.path.isdir(__snake_case ):
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case = 0
if " " in self.encoder:
snake_case = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 3 | 1 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Any ) -> int:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
snake_case = flax_key_tuple[:-1] + ("""weight""",)
snake_case = torch.permute(__lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ):
# linear layer
snake_case = flax_key_tuple[:-1] + ("""weight""",)
snake_case = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> Any:
if "metadata" in layer:
snake_case = layer.split("""metadata""" )
snake_case = """""".join(split_layer[0] )[:-1]
snake_case = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
snake_case = layer.split("""kvstore""" )
snake_case = """""".join(split_layer[0] )[:-1]
snake_case = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
snake_case = layer.split("""/""" )
snake_case = """/""".join(split_layer[:-1] )
snake_case = (split_layer[-1],)
if "kvstore/path" in layer:
snake_case = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
snake_case = """file"""
else:
snake_case = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ) -> Tuple:
snake_case = rename_keys(__lowerCAmelCase )
snake_case = {}
for k, v in current_block.items():
snake_case = v
snake_case = new_current_block
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : str = WEIGHTS_NAME ) -> str:
snake_case = convert_file_size_to_int(__lowerCAmelCase )
snake_case = []
snake_case = {}
snake_case = 0
snake_case = 0
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
snake_case = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
snake_case = flatten_dict(__lowerCAmelCase , sep="""/""" )
snake_case = {}
for layer in checkpoint_info.keys():
snake_case , snake_case , snake_case = get_key_and_tensorstore_dict(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if curr_real_layer_name in all_layers:
snake_case = content
else:
snake_case = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
snake_case = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
snake_case = torch.tensor(__lowerCAmelCase )
snake_case = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
snake_case , snake_case = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __lowerCAmelCase )
snake_case = """/""".join(__lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
snake_case = os.path.join(
__lowerCAmelCase , weights_name.replace(""".bin""" , F'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
snake_case = {}
snake_case = 0
snake_case = raw_weights.to(getattr(__lowerCAmelCase , __lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
snake_case = os.path.join(__lowerCAmelCase , weights_name.replace(""".bin""" , F'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) )
rename_and_save_block(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(__lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
snake_case = {}
snake_case = {}
for idx, shard in enumerate(__lowerCAmelCase ):
snake_case = weights_name.replace(
""".bin""" , F'''-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin''' ) # len(sharded_state_dicts):05d}
snake_case = os.path.join(__lowerCAmelCase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
snake_case = shard
for key in shard:
snake_case = shard_file
# Add the metadata
snake_case = {"""total_size""": total_size}
snake_case = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , """w""" , encoding="""utf-8""" ) as f:
snake_case = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + """\n"""
f.write(__lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowerCamelCase ( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
snake_case = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
snake_case = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
snake_case = TaTokenizer.from_pretrained("""t5-small""" )
snake_case = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
snake_case = tokenizer(__lowerCAmelCase , return_tensors="""pt""" ).input_ids
snake_case = model.generate(__lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 3 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> int:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def __lowerCamelCase ( __lowerCAmelCase : str ) -> int:
# word like '180' or '身高' or '神'
for char in word:
snake_case = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> int:
snake_case = set()
for token in tokens:
snake_case = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
snake_case = list(__lowerCAmelCase )
return word_list
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : set() ) -> Union[str, Any]:
if not chinese_word_set:
return bert_tokens
snake_case = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
snake_case = bert_tokens
snake_case , snake_case = 0, len(__lowerCAmelCase )
while start < end:
snake_case = True
if is_chinese(bert_word[start] ):
snake_case = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
snake_case = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
snake_case = """##""" + bert_word[j]
snake_case = start + i
snake_case = False
break
if single_word:
start += 1
return bert_word
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : LTP , __lowerCAmelCase : BertTokenizer ) -> List[str]:
snake_case = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
snake_case = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
snake_case = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
snake_case = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
snake_case = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
snake_case = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = []
for id in input_ids:
snake_case = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
snake_case = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
snake_case = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
snake_case = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> int:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
snake_case = f.readlines()
snake_case = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
snake_case = LTP(args.ltp ) # faster in GPU device
snake_case = BertTokenizer.from_pretrained(args.bert )
snake_case = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
snake_case = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
_SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = soup.findAll("""h1""" )
snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 3 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , __snake_case : Any , __snake_case : str=1_00 , __snake_case : str=13 , __snake_case : str=30 , __snake_case : Optional[int]=2 , __snake_case : List[str]=3 , __snake_case : int=True , __snake_case : int=True , __snake_case : Tuple=32 , __snake_case : Union[str, Any]=4 , __snake_case : Optional[Any]=4 , __snake_case : Dict=37 , __snake_case : List[Any]="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : str=10 , __snake_case : List[Any]=0.02 , __snake_case : int=3 , __snake_case : Optional[Any]=None , __snake_case : Optional[Any]=[0, 1, 2, 3] , )-> str:
snake_case = parent
snake_case = 1_00
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
snake_case = out_indices
snake_case = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def lowerCAmelCase ( self : List[str] )-> Dict:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Any )-> Optional[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowerCAmelCase ( self : Dict , __snake_case : int , __snake_case : Optional[int] , __snake_case : Any , __snake_case : List[str] )-> Tuple:
snake_case = BeitModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Any , __snake_case : Any , __snake_case : str , __snake_case : int , __snake_case : Any )-> Optional[Any]:
snake_case = BeitForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase ( self : str , __snake_case : str , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Tuple )-> Any:
snake_case = self.type_sequence_label_size
snake_case = BeitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = BeitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Tuple )-> Union[str, Any]:
snake_case = self.num_labels
snake_case = BeitForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowerCAmelCase ( self : str )-> Optional[Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowerCAmelCase ( self : int )-> str:
snake_case = BeitModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] )-> int:
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def lowerCAmelCase ( self : str )-> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowerCAmelCase ( self : Tuple )-> int:
pass
def lowerCAmelCase ( self : Tuple )-> Any:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowerCAmelCase ( self : Any )-> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowerCAmelCase ( self : Any )-> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def lowerCAmelCase ( self : Dict )-> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
def lowerCAmelCase ( self : str )-> int:
if not self.model_tester.is_training:
return
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__snake_case ), BeitForMaskedImageModeling]:
continue
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.train()
snake_case = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
snake_case = model(**__snake_case ).loss
loss.backward()
def lowerCAmelCase ( self : Tuple )-> List[str]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case = False
snake_case = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case = model_class(__snake_case )
model.gradient_checkpointing_enable()
model.to(__snake_case )
model.train()
snake_case = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
snake_case = model(**__snake_case ).loss
loss.backward()
def lowerCAmelCase ( self : int )-> Optional[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
snake_case = model_class(config=__snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowerCAmelCase ( self : List[Any] )-> int:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BeitModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __lowerCamelCase ( ) -> Optional[int]:
snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Any )-> Optional[int]:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Dict )-> str:
snake_case = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(__snake_case )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).pixel_values.to(__snake_case )
# prepare bool_masked_pos
snake_case = torch.ones((1, 1_96) , dtype=torch.bool ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(pixel_values=__snake_case , bool_masked_pos=__snake_case )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , __snake_case )
snake_case = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __snake_case , atol=1e-2 ) )
@slow
def lowerCAmelCase ( self : List[Any] )-> Union[str, Any]:
snake_case = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(__snake_case )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , __snake_case )
snake_case = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) )
snake_case = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __snake_case )
@slow
def lowerCAmelCase ( self : Tuple )-> int:
snake_case = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
__snake_case )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , __snake_case )
snake_case = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) )
snake_case = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __snake_case )
@slow
def lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
snake_case = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case = model.to(__snake_case )
snake_case = BeitImageProcessor(do_resize=__snake_case , size=6_40 , do_center_crop=__snake_case )
snake_case = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] )
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , __snake_case )
snake_case = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
snake_case = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=__snake_case , )
else:
snake_case = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=__snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowerCAmelCase ( self : List[str] )-> Any:
snake_case = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case = model.to(__snake_case )
snake_case = BeitImageProcessor(do_resize=__snake_case , size=6_40 , do_center_crop=__snake_case )
snake_case = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] )
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
snake_case = outputs.logits.detach().cpu()
snake_case = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] )
snake_case = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __snake_case )
snake_case = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
snake_case = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 3 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowerCamelCase ( ) -> str:
snake_case = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
snake_case = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
# Let's go
snake_case = parser.parse_args()
if not hasattr(__lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
snake_case = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 3 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import os
def __lowerCamelCase ( ) -> int:
snake_case = os.path.join(os.path.dirname(__lowerCAmelCase ) , """num.txt""" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 3 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any )-> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : Dict )-> Tuple:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case = tempfile.mktemp()
with open(__snake_case , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , __snake_case )
snake_case = AlbertTokenizer.from_pretrained(__snake_case )
finally:
os.remove(__snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , __snake_case )
snake_case = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def lowerCAmelCase ( self : Any )-> Dict:
# This test is for deprecated behavior and can be removed in v5
snake_case = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : List[str] )-> List[str]:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Any )-> List[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def lowerCAmelCase ( self : int )-> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(__snake_case )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case , repo_id="""test-tokenizer""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(__snake_case )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__snake_case , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase ( self : Optional[Any] )-> int:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizerFast.from_pretrained(__snake_case )
bert_tokenizer.save_pretrained(__snake_case )
snake_case = CustomTokenizerFast.from_pretrained(__snake_case )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
snake_case = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__snake_case , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Dict:
snake_case = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def lowerCAmelCase ( self : str )-> Dict:
snake_case = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def lowerCAmelCase ( self : int )-> Optional[int]:
snake_case = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
snake_case = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def lowerCAmelCase ( self : Optional[int] )-> Optional[int]:
snake_case = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case = Trie()
snake_case = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__snake_case , ["""AB""", """C"""] )
| 3 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 1 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_SCREAMING_SNAKE_CASE = [{"type": "code", "content": INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["vqvae"]
def __init__( self : List[str] , __snake_case : AutoencoderKL , __snake_case : UNetaDConditionModel , __snake_case : Mel , __snake_case : Union[DDIMScheduler, DDPMScheduler] , )-> Optional[int]:
super().__init__()
self.register_modules(unet=__snake_case , scheduler=__snake_case , mel=__snake_case , vqvae=__snake_case )
def lowerCAmelCase ( self : Union[str, Any] )-> int:
return 50 if isinstance(self.scheduler , __snake_case ) else 10_00
@torch.no_grad()
def __call__( self : Tuple , __snake_case : int = 1 , __snake_case : str = None , __snake_case : np.ndarray = None , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = None , __snake_case : torch.Generator = None , __snake_case : float = 0 , __snake_case : float = 0 , __snake_case : torch.Generator = None , __snake_case : float = 0 , __snake_case : torch.Tensor = None , __snake_case : torch.Tensor = None , __snake_case : Optional[int]=True , )-> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
snake_case = steps or self.get_default_steps()
self.scheduler.set_timesteps(__snake_case )
snake_case = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
snake_case = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
snake_case = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__snake_case , device=self.device , )
snake_case = noise
snake_case = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__snake_case , __snake_case )
snake_case = self.mel.audio_slice_to_image(__snake_case )
snake_case = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
snake_case = (input_image / 2_55) * 2 - 1
snake_case = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
snake_case = self.vqvae.encode(torch.unsqueeze(__snake_case , 0 ) ).latent_dist.sample(
generator=__snake_case )[0]
snake_case = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
snake_case = self.scheduler.add_noise(__snake_case , __snake_case , self.scheduler.timesteps[start_step - 1] )
snake_case = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
snake_case = int(mask_start_secs * pixels_per_second )
snake_case = int(mask_end_secs * pixels_per_second )
snake_case = self.scheduler.add_noise(__snake_case , __snake_case , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __snake_case ):
snake_case = self.unet(__snake_case , __snake_case , __snake_case )["""sample"""]
else:
snake_case = self.unet(__snake_case , __snake_case )["""sample"""]
if isinstance(self.scheduler , __snake_case ):
snake_case = self.scheduler.step(
model_output=__snake_case , timestep=__snake_case , sample=__snake_case , eta=__snake_case , generator=__snake_case , )["""prev_sample"""]
else:
snake_case = self.scheduler.step(
model_output=__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
snake_case = mask[:, step, :, :mask_start]
if mask_end > 0:
snake_case = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
snake_case = 1 / self.vqvae.config.scaling_factor * images
snake_case = self.vqvae.decode(__snake_case )["""sample"""]
snake_case = (images / 2 + 0.5).clamp(0 , 1 )
snake_case = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
snake_case = (images * 2_55).round().astype("""uint8""" )
snake_case = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__snake_case , mode="""RGB""" ).convert("""L""" ) for _ in images) )
snake_case = [self.mel.image_to_audio(__snake_case ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__snake_case )[:, np.newaxis, :] ) , **ImagePipelineOutput(__snake_case ) )
@torch.no_grad()
def lowerCAmelCase ( self : Tuple , __snake_case : List[Image.Image] , __snake_case : int = 50 )-> np.ndarray:
assert isinstance(self.scheduler , __snake_case )
self.scheduler.set_timesteps(__snake_case )
snake_case = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
snake_case = (sample / 2_55) * 2 - 1
snake_case = torch.Tensor(__snake_case ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
snake_case = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
snake_case = self.scheduler.alphas_cumprod[t]
snake_case = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
snake_case = 1 - alpha_prod_t
snake_case = self.unet(__snake_case , __snake_case )["""sample"""]
snake_case = (1 - alpha_prod_t_prev) ** 0.5 * model_output
snake_case = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
snake_case = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase ( __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : float )-> torch.Tensor:
snake_case = acos(torch.dot(torch.flatten(__snake_case ) , torch.flatten(__snake_case ) ) / torch.norm(__snake_case ) / torch.norm(__snake_case ) )
return sin((1 - alpha) * theta ) * xa / sin(__snake_case ) + sin(alpha * theta ) * xa / sin(__snake_case )
| 3 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
snake_case = SEWConfig()
if is_finetuned:
snake_case = model.wav_encoder.wav_model.cfg
else:
snake_case = model.cfg
snake_case = fs_config.conv_bias
snake_case = eval(fs_config.conv_feature_layers )
snake_case = [x[0] for x in conv_layers]
snake_case = [x[1] for x in conv_layers]
snake_case = [x[2] for x in conv_layers]
snake_case = """gelu"""
snake_case = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
snake_case = 0.0
snake_case = fs_config.activation_fn.name
snake_case = fs_config.encoder_embed_dim
snake_case = 0.02
snake_case = fs_config.encoder_ffn_embed_dim
snake_case = 1e-5
snake_case = fs_config.encoder_layerdrop
snake_case = fs_config.encoder_attention_heads
snake_case = fs_config.conv_pos_groups
snake_case = fs_config.conv_pos
snake_case = len(__lowerCAmelCase )
snake_case = fs_config.encoder_layers
snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case = model.cfg
snake_case = fs_config.final_dropout
snake_case = fs_config.layerdrop
snake_case = fs_config.activation_dropout
snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case = fs_config.attention_dropout
snake_case = fs_config.dropout_input
snake_case = fs_config.dropout
snake_case = fs_config.mask_channel_length
snake_case = fs_config.mask_channel_prob
snake_case = fs_config.mask_length
snake_case = fs_config.mask_prob
snake_case = """Wav2Vec2FeatureExtractor"""
snake_case = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : str=True ) -> Any:
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = convert_config(model[0] , __lowerCAmelCase )
snake_case = model[0].eval()
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = SEWForCTC(__lowerCAmelCase )
else:
snake_case = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_bigbird_pegasus": [
"BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BigBirdPegasusConfig",
"BigBirdPegasusOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST",
"BigBirdPegasusForCausalLM",
"BigBirdPegasusForConditionalGeneration",
"BigBirdPegasusForQuestionAnswering",
"BigBirdPegasusForSequenceClassification",
"BigBirdPegasusModel",
"BigBirdPegasusPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int]=[] ) -> Tuple:
snake_case = size[0] - overlap_pixels * 2
snake_case = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
snake_case = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
snake_case = np.pad(__lowerCAmelCase , mode="""linear_ramp""" , pad_width=__lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
snake_case = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
snake_case = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
snake_case = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
snake_case = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] ) -> Tuple:
return max(__lowerCAmelCase , min(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCamelCase ( __lowerCAmelCase : [int] , __lowerCAmelCase : [int] , __lowerCAmelCase : [int] ) -> Dict:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __lowerCamelCase ( __lowerCAmelCase : [int] , __lowerCAmelCase : int , __lowerCAmelCase : [int] ) -> Optional[int]:
snake_case = list(__lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
snake_case = clamp_rect(__lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ) -> str:
snake_case = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__lowerCAmelCase , (original_slice, 0) )
return result
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Any:
snake_case = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
snake_case = tile.crop(__lowerCAmelCase )
return tile
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Tuple:
snake_case = n % d
return n - divisor
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : AutoencoderKL , __snake_case : CLIPTextModel , __snake_case : CLIPTokenizer , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __snake_case : int = 3_50 , )-> str:
super().__init__(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , low_res_scheduler=__snake_case , scheduler=__snake_case , max_noise_level=__snake_case , )
def lowerCAmelCase ( self : Any , __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[str] , **__snake_case : Optional[Any] )-> Union[str, Any]:
torch.manual_seed(0 )
snake_case = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
snake_case = add_overlap_rect(__snake_case , __snake_case , image.size )
snake_case = image.crop(__snake_case )
snake_case = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
snake_case = translated_slice_x - (original_image_slice / 2)
snake_case = max(0 , __snake_case )
snake_case = squeeze_tile(__snake_case , __snake_case , __snake_case , __snake_case )
snake_case = to_input.size
snake_case = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
snake_case = super(__snake_case , self ).__call__(image=__snake_case , **__snake_case ).images[0]
snake_case = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
snake_case = unsqueeze_tile(__snake_case , __snake_case )
snake_case = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
snake_case = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
snake_case = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__snake_case ) , mode="""L""" , )
final_image.paste(
__snake_case , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __snake_case )
@torch.no_grad()
def __call__( self : List[Any] , __snake_case : Union[str, List[str]] , __snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] , __snake_case : int = 75 , __snake_case : float = 9.0 , __snake_case : int = 50 , __snake_case : Optional[Union[str, List[str]]] = None , __snake_case : Optional[int] = 1 , __snake_case : float = 0.0 , __snake_case : Optional[torch.Generator] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case : int = 1 , __snake_case : int = 1_28 , __snake_case : int = 32 , __snake_case : int = 32 , )-> List[Any]:
snake_case = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
snake_case = math.ceil(image.size[0] / tile_size )
snake_case = math.ceil(image.size[1] / tile_size )
snake_case = tcx * tcy
snake_case = 0
for y in range(__snake_case ):
for x in range(__snake_case ):
self._process_tile(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , prompt=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , noise_level=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def __lowerCamelCase ( ) -> str:
# Run a demo
snake_case = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case = StableDiffusionTiledUpscalePipeline.from_pretrained(__lowerCAmelCase , revision="""fp16""" , torch_dtype=torch.floataa )
snake_case = pipe.to("""cuda""" )
snake_case = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(__lowerCAmelCase : str ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save("""diffusers_library_progress.jpg""" )
snake_case = pipe(image=__lowerCAmelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__lowerCAmelCase )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BartphoTokenizer
snake_case_ = False
snake_case_ = True
def lowerCAmelCase ( self : Any )-> List[str]:
super().setUp()
snake_case = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = {"""unk_token""": """<unk>"""}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
snake_case = BartphoTokenizer(__snake_case , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[str] , **__snake_case : int )-> str:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : Tuple )-> str:
snake_case = """This is a là test"""
snake_case = """This is a<unk><unk> test"""
return input_text, output_text
def lowerCAmelCase ( self : Optional[int] )-> str:
snake_case = BartphoTokenizer(__snake_case , self.monolingual_vocab_file , **self.special_tokens_map )
snake_case = """This is a là test"""
snake_case = """▁This ▁is ▁a ▁l à ▁t est""".split()
snake_case = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
| 3 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3 | 1 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = 1.5
snake_case = int(factor * num_class_images )
snake_case = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__lowerCAmelCase , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__lowerCAmelCase )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case = client.query(text=__lowerCAmelCase )
if len(__lowerCAmelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
snake_case = int(factor * num_images )
snake_case = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__lowerCAmelCase , aesthetic_weight=0.1 , )
snake_case = 0
snake_case = 0
snake_case = tqdm(desc="""downloading real regularization images""" , total=__lowerCAmelCase )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
snake_case = class_images[count]
count += 1
try:
snake_case = requests.get(images["""url"""] )
if img.status_code == 2_00:
snake_case = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ) -> List[str]:
snake_case = argparse.ArgumentParser("""""" , add_help=__lowerCAmelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__lowerCAmelCase , type=__lowerCAmelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__lowerCAmelCase , type=__lowerCAmelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_00 , type=__lowerCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 3 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 1 |
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_SCREAMING_SNAKE_CASE = HfArgumentParser(InitializationArguments)
_SCREAMING_SNAKE_CASE = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_SCREAMING_SNAKE_CASE = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 3 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class _lowerCAmelCase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase ( __snake_case : Dict )-> Optional[Any]:
snake_case = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCAmelCase ( self : List[str] , __snake_case : Any , __snake_case : int , *__snake_case : List[str] , **__snake_case : int )-> Optional[Any]:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
snake_case = kwargs.pop("""main_process_only""" , __snake_case )
snake_case = kwargs.pop("""in_order""" , __snake_case )
if self.isEnabledFor(__snake_case ):
if self._should_log(__snake_case ):
snake_case , snake_case = self.process(__snake_case , __snake_case )
self.logger.log(__snake_case , __snake_case , *__snake_case , **__snake_case )
elif in_order:
snake_case = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
snake_case , snake_case = self.process(__snake_case , __snake_case )
self.logger.log(__snake_case , __snake_case , *__snake_case , **__snake_case )
state.wait_for_everyone()
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str = None ) -> Union[str, Any]:
if log_level is None:
snake_case = os.environ.get("""ACCELERATE_LOG_LEVEL""" , __lowerCAmelCase )
snake_case = logging.getLogger(__lowerCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__lowerCAmelCase , {} )
| 3 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 1 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = "Alexander Joslin"
import operator as op
from .stack import Stack
def __lowerCamelCase ( __lowerCAmelCase : str ) -> int:
snake_case = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
snake_case = Stack()
snake_case = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(__lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case = operator_stack.peek()
operator_stack.pop()
snake_case = operand_stack.peek()
operand_stack.pop()
snake_case = operand_stack.peek()
operand_stack.pop()
snake_case = operators[opr](__lowerCAmelCase , __lowerCAmelCase )
operand_stack.push(__lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 3 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 1 |
'''simple docstring'''
from math import factorial, radians
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : int = 18 , __lowerCAmelCase : int = 10 ) -> float:
snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
snake_case = radians(__lowerCAmelCase )
snake_case = angle_in_radians
snake_case = 3
snake_case = -1
for _ in range(__lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(__lowerCAmelCase )
snake_case = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CLIPTokenizer
snake_case_ = CLIPTokenizerFast
snake_case_ = True
snake_case_ = {}
snake_case_ = False
def lowerCAmelCase ( self : str )-> int:
super().setUp()
# fmt: off
snake_case = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
snake_case = {"""unk_token""": """<unk>"""}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__snake_case ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__snake_case ) )
def lowerCAmelCase ( self : Union[str, Any] , **__snake_case : Dict )-> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCAmelCase ( self : Any , **__snake_case : Tuple )-> Optional[int]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : Optional[int] )-> Any:
snake_case = """lower newer"""
snake_case = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
snake_case = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = """lower newer"""
snake_case = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
snake_case = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
@require_ftfy
def lowerCAmelCase ( self : List[str] )-> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case = self.tokenizer_class.from_pretrained(__snake_case , **__snake_case )
snake_case = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
snake_case = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
snake_case = tokenizer_s.tokenize(__snake_case )
snake_case = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
snake_case = """xa\u0303y""" + """ """ + """x\xe3y"""
snake_case = tokenizer_s.tokenize(__snake_case )
snake_case = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on unicode of space type
snake_case = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
snake_case = tokenizer_s.tokenize(__snake_case )
snake_case = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Test that the tokenization is identical on unicode of line break type
snake_case = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
snake_case = tokenizer_s.tokenize(__snake_case )
snake_case = tokenizer_r.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> Any:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case = f'''{text_of_1_token} {text_of_1_token}'''
snake_case = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , )
snake_case = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__snake_case ) + 1, len(__snake_case ) + 1 + len(__snake_case )) , )
snake_case = f''' {text}'''
snake_case = self.rust_tokenizer_class.from_pretrained(
__snake_case , use_fast=__snake_case , )
snake_case = tokenizer_r(__snake_case , return_offsets_mapping=__snake_case , add_special_tokens=__snake_case )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__snake_case )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__snake_case ) + 1, 1 + len(__snake_case ) + 1 + len(__snake_case )) , )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__snake_case ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCAmelCase ( self : str )-> Optional[int]:
super().test_tokenization_python_rust_equals()
def lowerCAmelCase ( self : List[Any] )-> Dict:
# CLIP always lower cases letters
pass
| 3 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "unispeech"
def __init__( self : Union[str, Any] , __snake_case : List[Any]=32 , __snake_case : List[str]=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Union[str, Any]=12 , __snake_case : Tuple=30_72 , __snake_case : List[str]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : int=0.1 , __snake_case : str=0.1 , __snake_case : List[Any]=0.0 , __snake_case : Union[str, Any]=0.0 , __snake_case : Union[str, Any]=0.1 , __snake_case : str=0.1 , __snake_case : Tuple=0.02 , __snake_case : Tuple=1e-5 , __snake_case : Dict="group" , __snake_case : Optional[Any]="gelu" , __snake_case : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __snake_case : List[Any]=(5, 2, 2, 2, 2, 2, 2) , __snake_case : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , __snake_case : int=False , __snake_case : Tuple=1_28 , __snake_case : int=16 , __snake_case : Dict=False , __snake_case : str=True , __snake_case : Union[str, Any]=0.05 , __snake_case : Tuple=10 , __snake_case : List[str]=2 , __snake_case : Dict=0.0 , __snake_case : int=10 , __snake_case : Any=0 , __snake_case : Any=3_20 , __snake_case : List[str]=2 , __snake_case : Any=0.1 , __snake_case : Optional[int]=1_00 , __snake_case : Optional[int]=2_56 , __snake_case : Optional[int]=2_56 , __snake_case : Optional[Any]=0.1 , __snake_case : int="mean" , __snake_case : Union[str, Any]=False , __snake_case : List[Any]=False , __snake_case : Optional[int]=2_56 , __snake_case : str=80 , __snake_case : Optional[int]=0 , __snake_case : Optional[int]=1 , __snake_case : int=2 , __snake_case : Optional[int]=0.5 , **__snake_case : Optional[int] , )-> Optional[Any]:
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
snake_case = hidden_size
snake_case = feat_extract_norm
snake_case = feat_extract_activation
snake_case = list(__snake_case )
snake_case = list(__snake_case )
snake_case = list(__snake_case )
snake_case = conv_bias
snake_case = num_conv_pos_embeddings
snake_case = num_conv_pos_embedding_groups
snake_case = len(self.conv_dim )
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = hidden_act
snake_case = num_attention_heads
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = feat_proj_dropout
snake_case = final_dropout
snake_case = layerdrop
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = num_ctc_classes
snake_case = vocab_size
snake_case = do_stable_layer_norm
snake_case = use_weighted_layer_sum
snake_case = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case = apply_spec_augment
snake_case = mask_time_prob
snake_case = mask_time_length
snake_case = mask_time_min_masks
snake_case = mask_feature_prob
snake_case = mask_feature_length
snake_case = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case = num_codevectors_per_group
snake_case = num_codevector_groups
snake_case = contrastive_logits_temperature
snake_case = feat_quantizer_dropout
snake_case = num_negatives
snake_case = codevector_dim
snake_case = proj_codevector_dim
snake_case = diversity_loss_weight
# ctc loss
snake_case = ctc_loss_reduction
snake_case = ctc_zero_infinity
# pretraining loss
snake_case = replace_prob
@property
def lowerCAmelCase ( self : str )-> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Dict:
snake_case = RobertaPreLayerNormConfig.from_pretrained(
__lowerCAmelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
snake_case = torch.load(hf_hub_download(repo_id=__lowerCAmelCase , filename="""pytorch_model.bin""" ) )
snake_case = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
snake_case = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
snake_case = tensor_value
snake_case = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=__lowerCAmelCase , config=__lowerCAmelCase , state_dict=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
# convert tokenizer
snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint-repo",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 3 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 1 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_SCREAMING_SNAKE_CASE = "src/diffusers"
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
_SCREAMING_SNAKE_CASE = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_SCREAMING_SNAKE_CASE = "\n{0} = None\n"
_SCREAMING_SNAKE_CASE = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
_SCREAMING_SNAKE_CASE = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Dict:
snake_case = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case = 0
snake_case = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
snake_case = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
snake_case = lines[line_index]
snake_case = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
snake_case = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any]=None ) -> Tuple:
if backend_specific_objects is None:
snake_case = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case = {}
for backend, objects in backend_specific_objects.items():
snake_case = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
snake_case = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
snake_case = dummy_file
return dummy_files
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any]=False ) -> Union[str, Any]:
snake_case = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
snake_case = os.path.join(__lowerCAmelCase , """utils""" )
snake_case = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
snake_case = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case = f.read()
else:
snake_case = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 3 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
snake_case = collections.OrderedDict()
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
snake_case = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case = token.rstrip("""\n""" )
snake_case = index
return vocab
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]:
snake_case = vocab
snake_case = unk_token
snake_case = max_input_chars_per_word
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]:
snake_case = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case = 0
snake_case = []
while start < len(__snake_case ):
snake_case = len(__snake_case )
snake_case = None
while start < end:
snake_case = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
snake_case = end
return sub_tokens
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
snake_case = bod_token
snake_case = eod_token
snake_case = load_vocab(__snake_case )
snake_case = self.encoder[space_token]
snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : str )-> Tuple:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] )-> int:
return len(self.encoder )
def lowerCAmelCase ( self : Any )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]:
snake_case = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]:
snake_case = [i for i in token_ids if i >= 0]
snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]:
return token in self.encoder
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
return "".join(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if os.path.isdir(__snake_case ):
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case = 0
if " " in self.encoder:
snake_case = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 3 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = IFPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase ( self : Dict )-> Dict:
return self._get_dummy_components()
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : str=0 )-> Optional[Any]:
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] )-> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase ( self : str )-> Dict:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
self._test_save_load_local()
def lowerCAmelCase ( self : str )-> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : str )-> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[Any] )-> Tuple:
# if
snake_case = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
snake_case = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=__snake_case , tokenizer=__snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
snake_case , snake_case = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case = None
snake_case = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__snake_case , __snake_case , __snake_case , __snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case = IFImgaImgPipeline(**pipe_a.components )
snake_case = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__snake_case , __snake_case , __snake_case , __snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case = IFInpaintingPipeline(**pipe_a.components )
snake_case = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__snake_case , __snake_case , __snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : int )-> Optional[Any]:
# pipeline 1
_start_torch_memory_measurement()
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (64, 64, 3)
snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
snake_case = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (2_56, 2_56, 3)
snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Dict )-> str:
# pipeline 1
_start_torch_memory_measurement()
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (64, 64, 3)
snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__snake_case )
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
snake_case = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , original_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (2_56, 2_56, 3)
snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowerCAmelCase ( self : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : int )-> int:
# pipeline 1
_start_torch_memory_measurement()
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , mask_image=__snake_case , num_inference_steps=2 , generator=__snake_case , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (64, 64, 3)
snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(__snake_case , __snake_case )
# pipeline 2
_start_torch_memory_measurement()
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__snake_case )
snake_case = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(__snake_case )
snake_case = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(__snake_case )
snake_case = pipe_a(
prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , image=__snake_case , mask_image=__snake_case , original_image=__snake_case , generator=__snake_case , num_inference_steps=2 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (2_56, 2_56, 3)
snake_case = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(__snake_case , __snake_case )
def __lowerCamelCase ( ) -> str:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 3 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 1 |
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_SCREAMING_SNAKE_CASE = (720, 1280) # Height, Width
_SCREAMING_SNAKE_CASE = (0.4, 0.6) # if height or width lower than this scale, drop it.
_SCREAMING_SNAKE_CASE = 1 / 100
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = 250
def __lowerCamelCase ( ) -> None:
snake_case , snake_case = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
for index in range(__lowerCAmelCase ):
snake_case = random.sample(range(len(__lowerCAmelCase ) ) , 4 )
snake_case , snake_case , snake_case = update_image_and_anno(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , filter_scale=__lowerCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case = random_chars(32 )
snake_case = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
snake_case = []
for anno in new_annos:
snake_case = anno[3] - anno[1]
snake_case = anno[4] - anno[2]
snake_case = anno[1] + width / 2
snake_case = anno[2] + height / 2
snake_case = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__lowerCAmelCase )
with open(F'''{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]:
snake_case = []
snake_case = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , """*.txt""" ) ):
snake_case = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
snake_case = in_file.readlines()
snake_case = os.path.join(__lowerCAmelCase , F'''{label_name}.jpg''' )
snake_case = []
for obj_list in obj_lists:
snake_case = obj_list.rstrip("""\n""" ).split(""" """ )
snake_case = float(obj[1] ) - float(obj[3] ) / 2
snake_case = float(obj[2] ) - float(obj[4] ) / 2
snake_case = float(obj[1] ) + float(obj[3] ) / 2
snake_case = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : list[int] , __lowerCAmelCase : tuple[int, int] , __lowerCAmelCase : tuple[float, float] , __lowerCAmelCase : float = 0.0 , ) -> tuple[list, list, str]:
snake_case = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case = int(scale_x * output_size[1] )
snake_case = int(scale_y * output_size[0] )
snake_case = []
snake_case = []
for i, index in enumerate(__lowerCAmelCase ):
snake_case = all_img_list[index]
path_list.append(__lowerCAmelCase )
snake_case = all_annos[index]
snake_case = cva.imread(__lowerCAmelCase )
if i == 0: # top-left
snake_case = cva.resize(__lowerCAmelCase , (divid_point_x, divid_point_y) )
snake_case = img
for bbox in img_annos:
snake_case = bbox[1] * scale_x
snake_case = bbox[2] * scale_y
snake_case = bbox[3] * scale_x
snake_case = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case = cva.resize(__lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
snake_case = img
for bbox in img_annos:
snake_case = scale_x + bbox[1] * (1 - scale_x)
snake_case = bbox[2] * scale_y
snake_case = scale_x + bbox[3] * (1 - scale_x)
snake_case = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case = cva.resize(__lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
snake_case = img
for bbox in img_annos:
snake_case = bbox[1] * scale_x
snake_case = scale_y + bbox[2] * (1 - scale_y)
snake_case = bbox[3] * scale_x
snake_case = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case = cva.resize(
__lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case = img
for bbox in img_annos:
snake_case = scale_x + bbox[1] * (1 - scale_x)
snake_case = scale_y + bbox[2] * (1 - scale_y)
snake_case = scale_x + bbox[3] * (1 - scale_x)
snake_case = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __lowerCamelCase ( __lowerCAmelCase : int ) -> str:
assert number_char > 1, "The number of character should greater than 1"
snake_case = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = soup.findAll("""h1""" )
snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 3 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_git": ["GIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GitConfig", "GitVisionConfig"],
"processing_git": ["GitProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"GIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GitForCausalLM",
"GitModel",
"GitPreTrainedModel",
"GitVisionModel",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "data2vec-vision"
def __init__( self : Union[str, Any] , __snake_case : str=7_68 , __snake_case : Dict=12 , __snake_case : List[Any]=12 , __snake_case : List[Any]=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : List[Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=1e-12 , __snake_case : Any=2_24 , __snake_case : Dict=16 , __snake_case : int=3 , __snake_case : str=False , __snake_case : List[Any]=False , __snake_case : str=False , __snake_case : Tuple=False , __snake_case : Tuple=0.1 , __snake_case : int=0.1 , __snake_case : int=True , __snake_case : Dict=[3, 5, 7, 11] , __snake_case : Optional[int]=[1, 2, 3, 6] , __snake_case : str=True , __snake_case : int=0.4 , __snake_case : Any=2_56 , __snake_case : str=1 , __snake_case : int=False , __snake_case : Tuple=2_55 , **__snake_case : List[str] , )-> Dict:
super().__init__(**__snake_case )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = use_mask_token
snake_case = use_absolute_position_embeddings
snake_case = use_relative_position_bias
snake_case = use_shared_relative_position_bias
snake_case = layer_scale_init_value
snake_case = drop_path_rate
snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case = out_indices
snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case = use_auxiliary_head
snake_case = auxiliary_loss_weight
snake_case = auxiliary_channels
snake_case = auxiliary_num_convs
snake_case = auxiliary_concat_input
snake_case = semantic_loss_ignore_index
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = version.parse("1.11" )
@property
def lowerCAmelCase ( self : int )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : List[Any] )-> float:
return 1e-4
| 3 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : str ) -> bool:
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> bool:
snake_case = credit_card_number
snake_case = 0
snake_case = len(__lowerCAmelCase ) - 2
for i in range(__lowerCAmelCase , -1 , -2 ):
# double the value of every second digit
snake_case = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
snake_case = cc_number[:i] + str(__lowerCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowerCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __lowerCamelCase ( __lowerCAmelCase : str ) -> bool:
snake_case = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__lowerCAmelCase ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__lowerCAmelCase ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__lowerCAmelCase ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 3 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_SCREAMING_SNAKE_CASE = {
"camembert-base": 512,
}
_SCREAMING_SNAKE_CASE = "▁"
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __snake_case : List[Any] , __snake_case : Tuple="<s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Dict="</s>" , __snake_case : List[str]="<s>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : List[Any]="<pad>" , __snake_case : int="<mask>" , __snake_case : Dict=["<s>NOTUSED", "</s>NOTUSED"] , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : Tuple , )-> None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
snake_case = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
snake_case = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
snake_case = len(self.fairseq_tokens_to_ids )
snake_case = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def lowerCAmelCase ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowerCAmelCase ( self : Tuple )-> Dict:
snake_case = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Any , __snake_case : str )-> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__snake_case ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : List[Any] )-> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Tuple )-> str:
snake_case = []
snake_case = """"""
snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__snake_case ) + token
snake_case = True
snake_case = []
else:
current_sub_tokens.append(__snake_case )
snake_case = False
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __getstate__( self : Dict )-> str:
snake_case = self.__dict__.copy()
snake_case = None
return state
def __setstate__( self : Dict , __snake_case : Any )-> List[Any]:
snake_case = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case = {}
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
snake_case = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
| 3 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 1 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 1 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __lowerCamelCase ( __lowerCAmelCase : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__lowerCAmelCase )
snake_case = """""".join(bin(__lowerCAmelCase )[2:].zfill(8 ) for byte in data )
snake_case = len(__lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
snake_case = B"""=""" * ((6 - len(__lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__lowerCAmelCase ) % 6)
else:
snake_case = B""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def __lowerCamelCase ( __lowerCAmelCase : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
snake_case = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
snake_case = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
snake_case = encoded_data[:-padding]
snake_case = """""".join(
bin(B64_CHARSET.index(__lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
snake_case = """""".join(
bin(B64_CHARSET.index(__lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__lowerCAmelCase ) , 8 )
]
return bytes(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_SCREAMING_SNAKE_CASE = "Usage of script: script_name <size_of_canvas:int>"
_SCREAMING_SNAKE_CASE = [0] * 100 + [1] * 10
random.shuffle(choice)
def __lowerCamelCase ( __lowerCAmelCase : int ) -> list[list[bool]]:
snake_case = [[False for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
return canvas
def __lowerCamelCase ( __lowerCAmelCase : list[list[bool]] ) -> None:
for i, row in enumerate(__lowerCAmelCase ):
for j, _ in enumerate(__lowerCAmelCase ):
snake_case = bool(random.getrandbits(1 ) )
def __lowerCamelCase ( __lowerCAmelCase : list[list[bool]] ) -> list[list[bool]]:
snake_case = np.array(__lowerCAmelCase )
snake_case = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCAmelCase ):
for c, pt in enumerate(__lowerCAmelCase ):
snake_case = __judge_point(
__lowerCAmelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
snake_case = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
snake_case = current_canvas.tolist()
return return_canvas
def __lowerCamelCase ( __lowerCAmelCase : bool , __lowerCAmelCase : list[list[bool]] ) -> bool:
snake_case = 0
snake_case = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
snake_case = pt
if pt:
if alive < 2:
snake_case = False
elif alive == 2 or alive == 3:
snake_case = True
elif alive > 3:
snake_case = False
else:
if alive == 3:
snake_case = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_SCREAMING_SNAKE_CASE = int(sys.argv[1])
# main working structure of this module.
_SCREAMING_SNAKE_CASE = create_canvas(canvas_size)
seed(c)
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = plt.subplots()
fig.show()
_SCREAMING_SNAKE_CASE = ListedColormap(["w", "k"])
try:
while True:
_SCREAMING_SNAKE_CASE = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 3 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
snake_case = SEWConfig()
if is_finetuned:
snake_case = model.wav_encoder.wav_model.cfg
else:
snake_case = model.cfg
snake_case = fs_config.conv_bias
snake_case = eval(fs_config.conv_feature_layers )
snake_case = [x[0] for x in conv_layers]
snake_case = [x[1] for x in conv_layers]
snake_case = [x[2] for x in conv_layers]
snake_case = """gelu"""
snake_case = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
snake_case = 0.0
snake_case = fs_config.activation_fn.name
snake_case = fs_config.encoder_embed_dim
snake_case = 0.02
snake_case = fs_config.encoder_ffn_embed_dim
snake_case = 1e-5
snake_case = fs_config.encoder_layerdrop
snake_case = fs_config.encoder_attention_heads
snake_case = fs_config.conv_pos_groups
snake_case = fs_config.conv_pos
snake_case = len(__lowerCAmelCase )
snake_case = fs_config.encoder_layers
snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case = model.cfg
snake_case = fs_config.final_dropout
snake_case = fs_config.layerdrop
snake_case = fs_config.activation_dropout
snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case = fs_config.attention_dropout
snake_case = fs_config.dropout_input
snake_case = fs_config.dropout
snake_case = fs_config.mask_channel_length
snake_case = fs_config.mask_channel_prob
snake_case = fs_config.mask_length
snake_case = fs_config.mask_prob
snake_case = """Wav2Vec2FeatureExtractor"""
snake_case = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : str=True ) -> Any:
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = convert_config(model[0] , __lowerCAmelCase )
snake_case = model[0].eval()
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = SEWForCTC(__lowerCAmelCase )
else:
snake_case = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "informer"
snake_case_ = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Tuple , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "student_t" , __snake_case : str = "nll" , __snake_case : int = 1 , __snake_case : List[int] = None , __snake_case : Optional[Union[str, bool]] = "mean" , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : int = 0 , __snake_case : Optional[List[int]] = None , __snake_case : Optional[List[int]] = None , __snake_case : int = 64 , __snake_case : int = 32 , __snake_case : int = 32 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : int = 2 , __snake_case : bool = True , __snake_case : str = "gelu" , __snake_case : float = 0.05 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : int = 1_00 , __snake_case : float = 0.02 , __snake_case : Tuple=True , __snake_case : str = "prob" , __snake_case : int = 5 , __snake_case : bool = True , **__snake_case : List[Any] , )-> int:
# time series specific configuration
snake_case = prediction_length
snake_case = context_length or prediction_length
snake_case = distribution_output
snake_case = loss
snake_case = input_size
snake_case = num_time_features
snake_case = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
snake_case = scaling
snake_case = num_dynamic_real_features
snake_case = num_static_real_features
snake_case = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
snake_case = cardinality
else:
snake_case = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
snake_case = embedding_dimension
else:
snake_case = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case = num_parallel_samples
# Transformer architecture configuration
snake_case = input_size * len(self.lags_sequence ) + self._number_of_features
snake_case = d_model
snake_case = encoder_attention_heads
snake_case = decoder_attention_heads
snake_case = encoder_ffn_dim
snake_case = decoder_ffn_dim
snake_case = encoder_layers
snake_case = decoder_layers
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = activation_function
snake_case = init_std
snake_case = use_cache
# Informer
snake_case = attention_type
snake_case = sampling_factor
snake_case = distil
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : Optional[int] )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = DistilBertTokenizer
snake_case_ = DistilBertTokenizerFast
snake_case_ = True
@slow
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
snake_case = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
snake_case = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case )
snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 3 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "new-model"
if is_tf_available():
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = NewModelConfig
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = """bert-base-cased"""
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
snake_case = """bert-base-cased"""
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModelForPreTraining.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModelForCausalLM.from_pretrained(__snake_case )
snake_case , snake_case = TFAutoModelForCausalLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : int )-> Any:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : str )-> List[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModelForMaskedLM.from_pretrained(__snake_case )
snake_case , snake_case = TFAutoModelForMaskedLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : int )-> Tuple:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case )
snake_case , snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModelForSequenceClassification.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Union[str, Any] )-> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_tensorflow_probability
def lowerCAmelCase ( self : Tuple )-> List[str]:
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
snake_case = AutoConfig.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(__snake_case )
snake_case , snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(
__snake_case , output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowerCAmelCase ( self : int )-> Optional[Any]:
snake_case = TFAutoModelWithLMHead.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
snake_case = TFAutoModel.from_pretrained("""sgugger/funnel-random-tiny""" )
self.assertIsInstance(__snake_case , __snake_case )
snake_case = copy.deepcopy(model.config )
snake_case = ["""FunnelBaseModel"""]
snake_case = TFAutoModel.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
snake_case = TFAutoModel.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Any:
try:
AutoConfig.register("""new-model""" , __snake_case )
snake_case = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
auto_class.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
auto_class.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = BertModelTester(self ).get_config()
snake_case = NewModelConfig(**tiny_config.to_dict() )
snake_case = auto_class.from_config(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__snake_case )
snake_case = auto_class.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def lowerCAmelCase ( self : str )-> str:
with self.assertRaisesRegex(
__snake_case , """bert-base is not a local folder and is not a valid model identifier""" ):
snake_case = TFAutoModel.from_pretrained("""bert-base""" )
def lowerCAmelCase ( self : Any )-> str:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = TFAutoModel.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : Optional[Any] )-> Any:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin""" , ):
snake_case = TFAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : Any )-> Dict:
with self.assertRaisesRegex(__snake_case , """Use `from_pt=True` to load this model""" ):
snake_case = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
# Make sure we have cached the model.
snake_case = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
snake_case = TFAutoModel.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
snake_case = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
with RequestCounter() as counter:
snake_case = TFAutoModel.from_pretrained("""ArthurZ/tiny-random-bert-sharded""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 3 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "bert"
def __init__( self : Union[str, Any] , __snake_case : Any=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : int=12 , __snake_case : int=12 , __snake_case : Dict=30_72 , __snake_case : Any="gelu" , __snake_case : List[str]=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=5_12 , __snake_case : Dict=2 , __snake_case : List[Any]=0.02 , __snake_case : int=1e-12 , __snake_case : Optional[Any]=0 , __snake_case : List[Any]="absolute" , __snake_case : List[Any]=True , __snake_case : List[Any]=None , **__snake_case : List[str] , )-> Optional[int]:
super().__init__(pad_token_id=__snake_case , **__snake_case )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 3 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float(moles / volume ) * nfactor )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def __lowerCamelCase ( __lowerCAmelCase : int ) -> Optional[int]:
snake_case = SwinConfig(image_size=1_92 )
if "base" in model_name:
snake_case = 6
snake_case = 1_28
snake_case = (2, 2, 18, 2)
snake_case = (4, 8, 16, 32)
elif "large" in model_name:
snake_case = 12
snake_case = 1_92
snake_case = (2, 2, 18, 2)
snake_case = (6, 12, 24, 48)
else:
raise ValueError("""Model not supported, only supports base and large variants""" )
snake_case = window_size
snake_case = embed_dim
snake_case = depths
snake_case = num_heads
return config
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> Tuple:
if "encoder.mask_token" in name:
snake_case = name.replace("""encoder.mask_token""" , """embeddings.mask_token""" )
if "encoder.patch_embed.proj" in name:
snake_case = name.replace("""encoder.patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "encoder.patch_embed.norm" in name:
snake_case = name.replace("""encoder.patch_embed.norm""" , """embeddings.norm""" )
if "attn.proj" in name:
snake_case = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
snake_case = """layernorm.weight"""
if name == "encoder.norm.bias":
snake_case = """layernorm.bias"""
if "decoder" in name:
pass
else:
snake_case = """swin.""" + name
return name
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(__lowerCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
snake_case = key.split(""".""" )
snake_case = int(key_split[2] )
snake_case = int(key_split[4] )
snake_case = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[
dim : dim * 2, :
]
snake_case = val[-dim:, :]
else:
snake_case = val[
:dim
]
snake_case = val[
dim : dim * 2
]
snake_case = val[
-dim:
]
else:
snake_case = val
return orig_state_dict
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> Union[str, Any]:
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )["""model"""]
snake_case = get_swin_config(__lowerCAmelCase )
snake_case = SwinForMaskedImageModeling(__lowerCAmelCase )
model.eval()
snake_case = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
snake_case = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case = ViTImageProcessor(size={"""height""": 1_92, """width""": 1_92} )
snake_case = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
snake_case = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
with torch.no_grad():
snake_case = model(**__lowerCAmelCase ).logits
print(outputs.keys() )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(F'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(F'''microsoft/{model_name}''' )
image_processor.push_to_hub(F'''microsoft/{model_name}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 3 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = 1
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
snake_case = []
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
snake_case = resnets
snake_case = attentions
if self.add_downsample:
snake_case = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Union[str, Any]=True )-> Union[str, Any]:
snake_case = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
snake_case = attn(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
snake_case = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = True
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : Tuple )-> int:
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=__snake_case , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = resnets
if self.add_downsample:
snake_case = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any]=True )-> Dict:
snake_case = ()
for resnet in self.resnets:
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
snake_case = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = 1
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = []
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case = self.prev_output_channel if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
snake_case = resnets
snake_case = attentions
if self.add_upsample:
snake_case = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __snake_case : int , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Union[str, Any]=True )-> List[Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case = res_hidden_states_tuple[-1]
snake_case = res_hidden_states_tuple[:-1]
snake_case = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
snake_case = attn(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
snake_case = self.upsamplers_a(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = True
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = []
for i in range(self.num_layers ):
snake_case = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case = self.prev_output_channel if i == 0 else self.out_channels
snake_case = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = resnets
if self.add_upsample:
snake_case = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : int , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict=True )-> Tuple:
for resnet in self.resnets:
# pop res hidden states
snake_case = res_hidden_states_tuple[-1]
snake_case = res_hidden_states_tuple[:-1]
snake_case = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
if self.add_upsample:
snake_case = self.upsamplers_a(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 0.0
snake_case_ = 1
snake_case_ = 1
snake_case_ = False
snake_case_ = False
snake_case_ = jnp.floataa
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# there is always at least one resnet
snake_case = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case = []
for _ in range(self.num_layers ):
snake_case = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__snake_case )
snake_case = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__snake_case )
snake_case = resnets
snake_case = attentions
def __call__( self : List[str] , __snake_case : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict=True )-> Union[str, Any]:
snake_case = self.resnets[0](__snake_case , __snake_case )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case = attn(__snake_case , __snake_case , deterministic=__snake_case )
snake_case = resnet(__snake_case , __snake_case , deterministic=__snake_case )
return hidden_states
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case_ = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case_ = False
snake_case_ = False
def lowerCAmelCase ( self : Dict , __snake_case : Tuple , __snake_case : str , __snake_case : Tuple=False )-> str:
snake_case = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
snake_case = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Tuple , __snake_case : List[Any] , __snake_case : str=13 , __snake_case : Optional[Any]=7 , __snake_case : Union[str, Any]=True , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : List[str]=True , __snake_case : int=99 , __snake_case : str=32 , __snake_case : List[Any]=32 , __snake_case : Tuple=2 , __snake_case : Union[str, Any]=4 , __snake_case : List[str]=37 , __snake_case : str="gelu" , __snake_case : Any=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Dict=5_12 , __snake_case : Optional[int]=16 , __snake_case : str=2 , __snake_case : int=0.02 , __snake_case : Any=3 , __snake_case : Tuple=4 , __snake_case : List[str]=None , )-> Tuple:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
snake_case = embedding_size
def lowerCAmelCase ( self : int )-> Optional[int]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Tuple )-> Optional[int]:
snake_case = TFMobileBertModel(config=__snake_case )
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case = model(__snake_case )
snake_case = [input_ids, input_mask]
snake_case = model(__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : str , __snake_case : str , __snake_case : int , __snake_case : List[Any] , __snake_case : int , __snake_case : str , __snake_case : List[str] , __snake_case : Optional[int] )-> Any:
snake_case = TFMobileBertForMaskedLM(config=__snake_case )
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Any , __snake_case : Any , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Tuple )-> Any:
snake_case = TFMobileBertForNextSentencePrediction(config=__snake_case )
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any] )-> List[Any]:
snake_case = TFMobileBertForPreTraining(config=__snake_case )
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case = model(__snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase ( self : Tuple , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : str , __snake_case : List[Any] , __snake_case : List[str] )-> Optional[int]:
snake_case = self.num_labels
snake_case = TFMobileBertForSequenceClassification(config=__snake_case )
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : int )-> Tuple:
snake_case = self.num_choices
snake_case = TFMobileBertForMultipleChoice(config=__snake_case )
snake_case = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
snake_case = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
snake_case = tf.tile(tf.expand_dims(__snake_case , 1 ) , (1, self.num_choices, 1) )
snake_case = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Any , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Optional[int] )-> List[str]:
snake_case = self.num_labels
snake_case = TFMobileBertForTokenClassification(config=__snake_case )
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Dict )-> int:
snake_case = TFMobileBertForQuestionAnswering(config=__snake_case )
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case = model(__snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[int] )-> Optional[Any]:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def lowerCAmelCase ( self : Union[str, Any] )-> Tuple:
snake_case = TFMobileBertModelTest.TFMobileBertModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowerCAmelCase ( self : int )-> str:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict )-> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__snake_case )
def lowerCAmelCase ( self : Any )-> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__snake_case )
def lowerCAmelCase ( self : int )-> str:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__snake_case )
def lowerCAmelCase ( self : Optional[Any] )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__snake_case )
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__snake_case )
@slow
def lowerCAmelCase ( self : Any )-> str:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
snake_case = TFMobileBertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Optional[int] )-> Dict:
snake_case = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
snake_case = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case = model(__snake_case )[0]
snake_case = [1, 6, 3_05_22]
self.assertEqual(output.shape , __snake_case )
snake_case = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1e-4 )
| 3 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["transformers", "torch", "note_seq"]
def __init__( self : Tuple , *__snake_case : Tuple , **__snake_case : int )-> Union[str, Any]:
requires_backends(self , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__snake_case : List[Any] , **__snake_case : Optional[int] )-> Any:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
@classmethod
def lowerCAmelCase ( cls : int , *__snake_case : Any , **__snake_case : int )-> int:
requires_backends(cls , ["""transformers""", """torch""", """note_seq"""] )
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "markuplm"
def __init__( self : Union[str, Any] , __snake_case : List[Any]=3_05_22 , __snake_case : Tuple=7_68 , __snake_case : List[Any]=12 , __snake_case : Optional[Any]=12 , __snake_case : Dict=30_72 , __snake_case : Dict="gelu" , __snake_case : Dict=0.1 , __snake_case : str=0.1 , __snake_case : str=5_12 , __snake_case : Optional[int]=2 , __snake_case : str=0.02 , __snake_case : str=1e-12 , __snake_case : Tuple=0 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any=2_56 , __snake_case : List[Any]=10_24 , __snake_case : Tuple=2_16 , __snake_case : int=10_01 , __snake_case : Tuple=32 , __snake_case : Tuple=50 , __snake_case : Dict="absolute" , __snake_case : List[Any]=True , __snake_case : List[Any]=None , **__snake_case : str , )-> Dict:
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
# additional properties
snake_case = max_depth
snake_case = max_xpath_tag_unit_embeddings
snake_case = max_xpath_subs_unit_embeddings
snake_case = tag_pad_id
snake_case = subs_pad_id
snake_case = xpath_unit_hidden_size
| 3 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "OwlViTImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , __snake_case : Optional[int]=None , __snake_case : Optional[int]=None , **__snake_case : int )-> List[str]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __snake_case , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__snake_case , __snake_case )
def __call__( self : List[Any] , __snake_case : int=None , __snake_case : List[str]=None , __snake_case : List[Any]=None , __snake_case : Any="max_length" , __snake_case : List[str]="np" , **__snake_case : int )-> Any:
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__snake_case , __snake_case ) or (isinstance(__snake_case , __snake_case ) and not isinstance(text[0] , __snake_case )):
snake_case = [self.tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case , **__snake_case )]
elif isinstance(__snake_case , __snake_case ) and isinstance(text[0] , __snake_case ):
snake_case = []
# Maximum number of queries across batch
snake_case = max([len(__snake_case ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__snake_case ) != max_num_queries:
snake_case = t + [""" """] * (max_num_queries - len(__snake_case ))
snake_case = self.tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case , **__snake_case )
encodings.append(__snake_case )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
snake_case = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
snake_case = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
snake_case = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
snake_case = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
snake_case = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
snake_case = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
snake_case = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
snake_case = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
snake_case = BatchEncoding()
snake_case = input_ids
snake_case = attention_mask
if query_images is not None:
snake_case = BatchEncoding()
snake_case = self.image_processor(
__snake_case , return_tensors=__snake_case , **__snake_case ).pixel_values
snake_case = query_pixel_values
if images is not None:
snake_case = self.image_processor(__snake_case , return_tensors=__snake_case , **__snake_case )
if text is not None and images is not None:
snake_case = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
snake_case = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__snake_case ) , tensor_type=__snake_case )
def lowerCAmelCase ( self : List[Any] , *__snake_case : int , **__snake_case : Union[str, Any] )-> str:
return self.image_processor.post_process(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Dict , *__snake_case : Union[str, Any] , **__snake_case : Tuple )-> List[str]:
return self.image_processor.post_process_object_detection(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : List[str] , *__snake_case : str , **__snake_case : Union[str, Any] )-> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : List[Any] , **__snake_case : Union[str, Any] )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : List[Any] , **__snake_case : List[Any] )-> Any:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : Any )-> Union[str, Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __snake_case , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : int )-> str:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __snake_case , )
return self.image_processor
| 3 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
snake_case = collections.OrderedDict()
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
snake_case = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case = token.rstrip("""\n""" )
snake_case = index
return vocab
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]:
snake_case = vocab
snake_case = unk_token
snake_case = max_input_chars_per_word
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]:
snake_case = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case = 0
snake_case = []
while start < len(__snake_case ):
snake_case = len(__snake_case )
snake_case = None
while start < end:
snake_case = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
snake_case = end
return sub_tokens
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
snake_case = bod_token
snake_case = eod_token
snake_case = load_vocab(__snake_case )
snake_case = self.encoder[space_token]
snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : str )-> Tuple:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] )-> int:
return len(self.encoder )
def lowerCAmelCase ( self : Any )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]:
snake_case = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]:
snake_case = [i for i in token_ids if i >= 0]
snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]:
return token in self.encoder
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
return "".join(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if os.path.isdir(__snake_case ):
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case = 0
if " " in self.encoder:
snake_case = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list ) -> float:
_validate_point(__lowerCAmelCase )
_validate_point(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCamelCase ( __lowerCAmelCase : list[float] ) -> None:
if point:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for item in point:
if not isinstance(__lowerCAmelCase , (int, float) ):
snake_case = (
"""Expected a list of numbers as input, found """
F'''{type(__lowerCAmelCase ).__name__}'''
)
raise TypeError(__lowerCAmelCase )
else:
snake_case = F'''Expected a list of numbers as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
else:
raise ValueError("""Missing an input""" )
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list ) -> float:
_validate_point(__lowerCAmelCase )
_validate_point(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 1 |
'''simple docstring'''
import operator as op
_SCREAMING_SNAKE_CASE = "scaler.pt"
_SCREAMING_SNAKE_CASE = "pytorch_model"
_SCREAMING_SNAKE_CASE = "random_states"
_SCREAMING_SNAKE_CASE = "optimizer"
_SCREAMING_SNAKE_CASE = "scheduler"
_SCREAMING_SNAKE_CASE = "pytorch_model.bin"
_SCREAMING_SNAKE_CASE = "pytorch_model.bin.index.json"
_SCREAMING_SNAKE_CASE = "model.safetensors"
_SCREAMING_SNAKE_CASE = "model.safetensors.index.json"
_SCREAMING_SNAKE_CASE = "1.10.2"
_SCREAMING_SNAKE_CASE = "py38"
_SCREAMING_SNAKE_CASE = "4.17.0"
_SCREAMING_SNAKE_CASE = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_SCREAMING_SNAKE_CASE = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_SCREAMING_SNAKE_CASE = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_SCREAMING_SNAKE_CASE = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_SCREAMING_SNAKE_CASE = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_SCREAMING_SNAKE_CASE = "2.0.1"
_SCREAMING_SNAKE_CASE = ["pdsh", "standard", "openmpi", "mvapich"]
_SCREAMING_SNAKE_CASE = ["default", "reduce-overhead", "max-autotune"]
_SCREAMING_SNAKE_CASE = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_SCREAMING_SNAKE_CASE = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_SCREAMING_SNAKE_CASE = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_SCREAMING_SNAKE_CASE = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = soup.findAll("""h1""" )
snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 3 | 1 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 3 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list ) -> bool:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(__lowerCAmelCase ) == 1:
return True
snake_case = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowerCamelCase ( __lowerCAmelCase : list ) -> float:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
snake_case = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_SCREAMING_SNAKE_CASE = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
_SCREAMING_SNAKE_CASE = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
_SCREAMING_SNAKE_CASE = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Any )-> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def lowerCAmelCase ( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Optional[int]=None , __snake_case : Optional[int]=True , __snake_case : Dict=False )-> Any:
if rouge_types is None:
snake_case = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
snake_case = rouge_scorer.RougeScorer(rouge_types=__snake_case , use_stemmer=__snake_case )
if use_aggregator:
snake_case = scoring.BootstrapAggregator()
else:
snake_case = []
for ref, pred in zip(__snake_case , __snake_case ):
snake_case = scorer.score(__snake_case , __snake_case )
if use_aggregator:
aggregator.add_scores(__snake_case )
else:
scores.append(__snake_case )
if use_aggregator:
snake_case = aggregator.aggregate()
else:
snake_case = {}
for key in scores[0]:
snake_case = [score[key] for score in scores]
return result
| 3 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : Union[str, Any]=0.0 , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : str = "layer_norm" , __snake_case : bool = False , )-> Dict:
super().__init__()
snake_case = only_cross_attention
snake_case = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case = AdaLayerNorm(__snake_case , __snake_case )
elif self.use_ada_layer_norm_zero:
snake_case = AdaLayerNormZero(__snake_case , __snake_case )
else:
snake_case = nn.LayerNorm(__snake_case , elementwise_affine=__snake_case )
snake_case = Attention(
query_dim=__snake_case , heads=__snake_case , dim_head=__snake_case , dropout=__snake_case , bias=__snake_case , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__snake_case , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case = (
AdaLayerNorm(__snake_case , __snake_case )
if self.use_ada_layer_norm
else nn.LayerNorm(__snake_case , elementwise_affine=__snake_case )
)
snake_case = Attention(
query_dim=__snake_case , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__snake_case , dim_head=__snake_case , dropout=__snake_case , bias=__snake_case , upcast_attention=__snake_case , ) # is self-attn if encoder_hidden_states is none
else:
snake_case = None
snake_case = None
# 3. Feed-forward
snake_case = nn.LayerNorm(__snake_case , elementwise_affine=__snake_case )
snake_case = FeedForward(__snake_case , dropout=__snake_case , activation_fn=__snake_case , final_dropout=__snake_case )
# let chunk size default to None
snake_case = None
snake_case = 0
def lowerCAmelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : int )-> List[str]:
# Sets chunk feed-forward
snake_case = chunk_size
snake_case = dim
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : torch.FloatTensor , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Dict[str, Any] = None , __snake_case : Optional[torch.LongTensor] = None , )-> List[str]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case = self.norma(__snake_case , __snake_case )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case = self.norma(
__snake_case , __snake_case , __snake_case , hidden_dtype=hidden_states.dtype )
else:
snake_case = self.norma(__snake_case )
snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case = self.attna(
__snake_case , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__snake_case , **__snake_case , )
if self.use_ada_layer_norm_zero:
snake_case = gate_msa.unsqueeze(1 ) * attn_output
snake_case = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case = (
self.norma(__snake_case , __snake_case ) if self.use_ada_layer_norm else self.norma(__snake_case )
)
snake_case = self.attna(
__snake_case , encoder_hidden_states=__snake_case , attention_mask=__snake_case , **__snake_case , )
snake_case = attn_output + hidden_states
# 3. Feed-forward
snake_case = self.norma(__snake_case )
if self.use_ada_layer_norm_zero:
snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case = torch.cat(
[self.ff(__snake_case ) for hid_slice in norm_hidden_states.chunk(__snake_case , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case = self.ff(__snake_case )
if self.use_ada_layer_norm_zero:
snake_case = gate_mlp.unsqueeze(1 ) * ff_output
snake_case = ff_output + hidden_states
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : Optional[int] = None , __snake_case : int = 4 , __snake_case : float = 0.0 , __snake_case : str = "geglu" , __snake_case : bool = False , )-> Tuple:
super().__init__()
snake_case = int(dim * mult )
snake_case = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case = GELU(__snake_case , __snake_case )
if activation_fn == "gelu-approximate":
snake_case = GELU(__snake_case , __snake_case , approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case = GEGLU(__snake_case , __snake_case )
elif activation_fn == "geglu-approximate":
snake_case = ApproximateGELU(__snake_case , __snake_case )
snake_case = nn.ModuleList([] )
# project in
self.net.append(__snake_case )
# project dropout
self.net.append(nn.Dropout(__snake_case ) )
# project out
self.net.append(nn.Linear(__snake_case , __snake_case ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__snake_case ) )
def lowerCAmelCase ( self : List[str] , __snake_case : Any )-> Optional[int]:
for module in self.net:
snake_case = module(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : int , __snake_case : int , __snake_case : str = "none" )-> Optional[Any]:
super().__init__()
snake_case = nn.Linear(__snake_case , __snake_case )
snake_case = approximate
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Union[str, Any] )-> str:
if gate.device.type != "mps":
return F.gelu(__snake_case , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCAmelCase ( self : List[str] , __snake_case : List[Any] )-> Optional[Any]:
snake_case = self.proj(__snake_case )
snake_case = self.gelu(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : int )-> Union[str, Any]:
super().__init__()
snake_case = nn.Linear(__snake_case , dim_out * 2 )
def lowerCAmelCase ( self : Optional[int] , __snake_case : Optional[Any] )-> int:
if gate.device.type != "mps":
return F.gelu(__snake_case )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCAmelCase ( self : Any , __snake_case : Union[str, Any] )-> Tuple:
snake_case , snake_case = self.proj(__snake_case ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__snake_case )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : int , __snake_case : int )-> List[str]:
super().__init__()
snake_case = nn.Linear(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] , __snake_case : Optional[Any] )-> Any:
snake_case = self.proj(__snake_case )
return x * torch.sigmoid(1.7_02 * x )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : int )-> str:
super().__init__()
snake_case = nn.Embedding(__snake_case , __snake_case )
snake_case = nn.SiLU()
snake_case = nn.Linear(__snake_case , embedding_dim * 2 )
snake_case = nn.LayerNorm(__snake_case , elementwise_affine=__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : List[str] )-> Tuple:
snake_case = self.linear(self.silu(self.emb(__snake_case ) ) )
snake_case , snake_case = torch.chunk(__snake_case , 2 )
snake_case = self.norm(__snake_case ) * (1 + scale) + shift
return x
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : str , __snake_case : Union[str, Any] )-> Any:
super().__init__()
snake_case = CombinedTimestepLabelEmbeddings(__snake_case , __snake_case )
snake_case = nn.SiLU()
snake_case = nn.Linear(__snake_case , 6 * embedding_dim , bias=__snake_case )
snake_case = nn.LayerNorm(__snake_case , elementwise_affine=__snake_case , eps=1e-6 )
def lowerCAmelCase ( self : str , __snake_case : Dict , __snake_case : int , __snake_case : int , __snake_case : Union[str, Any]=None )-> Optional[int]:
snake_case = self.linear(self.silu(self.emb(__snake_case , __snake_case , hidden_dtype=__snake_case ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = emb.chunk(6 , dim=1 )
snake_case = self.norm(__snake_case ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : Optional[str] = None , __snake_case : float = 1e-5 )-> List[Any]:
super().__init__()
snake_case = num_groups
snake_case = eps
if act_fn is None:
snake_case = None
else:
snake_case = get_activation(__snake_case )
snake_case = nn.Linear(__snake_case , out_dim * 2 )
def lowerCAmelCase ( self : Dict , __snake_case : Optional[Any] , __snake_case : Tuple )-> Optional[Any]:
if self.act:
snake_case = self.act(__snake_case )
snake_case = self.linear(__snake_case )
snake_case = emb[:, :, None, None]
snake_case , snake_case = emb.chunk(2 , dim=1 )
snake_case = F.group_norm(__snake_case , self.num_groups , eps=self.eps )
snake_case = x * (1 + scale) + shift
return x
| 3 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Any ) -> Union[str, Any]:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Dict:
snake_case = [[float("""inf""" ) for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
snake_case = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCAmelCase ):
# looping through rows of graph array
for i in range(__lowerCAmelCase ):
# looping through columns of graph array
for j in range(__lowerCAmelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case = dist[i][k] + dist[k][j]
_print_dist(__lowerCAmelCase , __lowerCAmelCase )
return dist, v
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = int(input("Enter number of vertices: "))
_SCREAMING_SNAKE_CASE = int(input("Enter number of edges: "))
_SCREAMING_SNAKE_CASE = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
_SCREAMING_SNAKE_CASE = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
_SCREAMING_SNAKE_CASE = int(input("Enter source:"))
_SCREAMING_SNAKE_CASE = int(input("Enter destination:"))
_SCREAMING_SNAKE_CASE = float(input("Enter weight:"))
_SCREAMING_SNAKE_CASE = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 3 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : torch.nn.Module , __lowerCAmelCase : BnbQuantizationConfig , __lowerCAmelCase : Union[str, os.PathLike] = None , __lowerCAmelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCAmelCase : Optional[Union[str, os.PathLike]] = None , __lowerCAmelCase : bool = False , ) -> Dict:
snake_case = bnb_quantization_config.load_in_abit
snake_case = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"""You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"""
""" make sure you have the latest version of `bitsandbytes` installed.""" )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"""You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"""
"""make sure you have the latest version of `bitsandbytes` installed.""" )
snake_case = []
# custom device map
if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(device_map.keys() ) > 1:
snake_case = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case = get_keys_to_not_convert(__lowerCAmelCase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowerCAmelCase )
snake_case = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case = []
snake_case = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowerCAmelCase )
# compatibility with peft
snake_case = load_in_abit
snake_case = load_in_abit
snake_case = get_parameter_device(__lowerCAmelCase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"""It is not recommended to quantize a loaded model. """
"""The model should be instantiated under the `init_empty_weights` context manager.""" )
snake_case = replace_with_bnb_layers(__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
# convert param to the right dtype
snake_case = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" )
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowerCAmelCase ):
param.to(__lowerCAmelCase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"""We move the model to cuda.""" )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
snake_case = replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , modules_to_not_convert=__lowerCAmelCase )
snake_case = get_quantized_model_device_map(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_memory=__lowerCAmelCase , no_split_module_classes=__lowerCAmelCase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case = True
snake_case = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] )
load_checkpoint_in_model(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCAmelCase , offload_state_dict=__lowerCAmelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowerCAmelCase , device_map=__lowerCAmelCase , offload_dir=__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : str=None ) -> Tuple:
if device_map is None:
if torch.cuda.is_available():
snake_case = {"""""": torch.cuda.current_device()}
else:
raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" )
logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"""If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """
"""'sequential'.""" )
snake_case = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case = {}
snake_case = special_dtypes
snake_case = no_split_module_classes
snake_case = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case = get_balanced_memory(
__lowerCAmelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=__lowerCAmelCase , **__lowerCAmelCase , )
snake_case = max_memory
snake_case = infer_auto_device_map(__lowerCAmelCase , **__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# check if don't have any quantized module on the cpu
snake_case = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"""
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
""" )
else:
logger.info(
"""Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=None ) -> Any:
if modules_to_not_convert is None:
snake_case = []
snake_case , snake_case = _replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Dict=None , ) -> str:
snake_case = False
for name, module in model.named_children():
if current_key_name is None:
snake_case = []
current_key_name.append(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case = """.""".join(__lowerCAmelCase )
snake_case = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCAmelCase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" )
snake_case = module.weight.data
if module.bias is not None:
snake_case = module.bias.data
bnb_module.requires_grad_(__lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case = True
if len(list(module.children() ) ) > 0:
snake_case , snake_case = _replace_with_bnb_layers(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> List[Any]:
# Create a copy of the model
with init_empty_weights():
snake_case = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case = sum(__lowerCAmelCase , [] )
snake_case = len(__lowerCAmelCase ) > 0
# Check if it is a base model
snake_case = False
if hasattr(__lowerCAmelCase , """base_model_prefix""" ):
snake_case = not hasattr(__lowerCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case = list(model.named_children() )
snake_case = [list_modules[-1][0]]
# add last module together with tied weights
snake_case = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
snake_case = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
snake_case = [""".weight""", """.bias"""]
snake_case = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case = name.replace(__lowerCAmelCase , """""" )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> str:
for m in model.modules():
if isinstance(__lowerCAmelCase , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( __lowerCAmelCase : nn.Module ) -> int:
return next(parameter.parameters() ).device
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , 0 , dtype=__lowerCAmelCase , value=__lowerCAmelCase )
snake_case = param_name
snake_case = model
if "." in tensor_name:
snake_case = tensor_name.split(""".""" )
for split in splits[:-1]:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
snake_case = new_module
snake_case = splits[-1]
# offload weights
snake_case = False
offload_weight(module._parameters[tensor_name] , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
if hasattr(module._parameters[tensor_name] , """SCB""" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , __lowerCAmelCase , index=__lowerCAmelCase , )
else:
offload_weight(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index=__lowerCAmelCase )
offload_weight(__lowerCAmelCase , param_name.replace("""weight""" , """SCB""" ) , __lowerCAmelCase , index=__lowerCAmelCase )
set_module_tensor_to_device(__lowerCAmelCase , __lowerCAmelCase , """meta""" , dtype=__lowerCAmelCase , value=torch.empty(*param.size() ) )
| 3 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
snake_case = SEWConfig()
if is_finetuned:
snake_case = model.wav_encoder.wav_model.cfg
else:
snake_case = model.cfg
snake_case = fs_config.conv_bias
snake_case = eval(fs_config.conv_feature_layers )
snake_case = [x[0] for x in conv_layers]
snake_case = [x[1] for x in conv_layers]
snake_case = [x[2] for x in conv_layers]
snake_case = """gelu"""
snake_case = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
snake_case = 0.0
snake_case = fs_config.activation_fn.name
snake_case = fs_config.encoder_embed_dim
snake_case = 0.02
snake_case = fs_config.encoder_ffn_embed_dim
snake_case = 1e-5
snake_case = fs_config.encoder_layerdrop
snake_case = fs_config.encoder_attention_heads
snake_case = fs_config.conv_pos_groups
snake_case = fs_config.conv_pos
snake_case = len(__lowerCAmelCase )
snake_case = fs_config.encoder_layers
snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case = model.cfg
snake_case = fs_config.final_dropout
snake_case = fs_config.layerdrop
snake_case = fs_config.activation_dropout
snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case = fs_config.attention_dropout
snake_case = fs_config.dropout_input
snake_case = fs_config.dropout
snake_case = fs_config.mask_channel_length
snake_case = fs_config.mask_channel_prob
snake_case = fs_config.mask_length
snake_case = fs_config.mask_prob
snake_case = """Wav2Vec2FeatureExtractor"""
snake_case = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : str=True ) -> Any:
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = convert_config(model[0] , __lowerCAmelCase )
snake_case = model[0].eval()
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = SEWForCTC(__lowerCAmelCase )
else:
snake_case = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 | 1 |
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
snake_case = VideoMAEConfig()
set_architecture_configs(__lowerCAmelCase , __lowerCAmelCase )
if "finetuned" not in model_name:
snake_case = False
if "finetuned" in model_name:
snake_case = """huggingface/label-files"""
if "kinetics" in model_name:
snake_case = 4_00
snake_case = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
snake_case = 1_74
snake_case = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
snake_case = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if "small" in model_name:
snake_case = 3_84
snake_case = 15_36
snake_case = 12
snake_case = 16
snake_case = 12
snake_case = 3
snake_case = 1_92
snake_case = 7_68
elif "large" in model_name:
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 12
snake_case = 8
snake_case = 5_12
snake_case = 20_48
elif "huge" in model_name:
snake_case = 12_80
snake_case = 51_20
snake_case = 32
snake_case = 16
snake_case = 12
snake_case = 8
snake_case = 6_40
snake_case = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Union[str, Any]:
if "encoder." in name:
snake_case = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
snake_case = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
snake_case = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
snake_case = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
snake_case = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
snake_case = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
snake_case = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
snake_case = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
snake_case = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
snake_case = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
snake_case = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
snake_case = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
snake_case = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
snake_case = name.replace("""head""" , """classifier""" )
return name
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str ) -> List[str]:
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(__lowerCAmelCase )
if key.startswith("""encoder.""" ):
snake_case = key.replace("""encoder.""" , """""" )
if "qkv" in key:
snake_case = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
snake_case = config.decoder_hidden_size
snake_case = int(key_split[2] )
snake_case = """decoder.decoder_layers."""
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = config.hidden_size
snake_case = int(key_split[1] )
snake_case = """videomae.encoder.layer."""
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = val
return orig_state_dict
def __lowerCamelCase ( ) -> int:
snake_case = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
snake_case = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
snake_case = get_videomae_config(__lowerCAmelCase )
if "finetuned" in model_name:
snake_case = VideoMAEForVideoClassification(__lowerCAmelCase )
else:
snake_case = VideoMAEForPreTraining(__lowerCAmelCase )
# download original checkpoint, hosted on Google Drive
snake_case = """pytorch_model.bin"""
gdown.cached_download(__lowerCAmelCase , __lowerCAmelCase , quiet=__lowerCAmelCase )
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
if "model" in files:
snake_case = files["""model"""]
else:
snake_case = files["""module"""]
snake_case = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify model on basic input
snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case = prepare_video()
snake_case = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
if "finetuned" not in model_name:
snake_case = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
snake_case = torch.load(__lowerCAmelCase )
snake_case = model(**__lowerCAmelCase )
snake_case = outputs.logits
snake_case = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case = outputs.loss
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(__lowerCAmelCase , organization="""nielsr""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 3 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 1 |
'''simple docstring'''
import itertools
import math
def __lowerCamelCase ( __lowerCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( ) -> Optional[Any]:
snake_case = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def __lowerCamelCase ( __lowerCAmelCase : int = 1_00_01 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : List[str] , __snake_case : Tuple )-> Optional[int]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
@torch.no_grad()
def __call__( self : Tuple , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , )-> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __snake_case ):
snake_case = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case = randn_tensor(__snake_case , generator=__snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case ).prev_sample
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
| 3 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"text": Value("string" )} )
snake_case_ = Features({"summary": Value("string" )} )
snake_case_ = "text"
snake_case_ = "summary"
@property
def lowerCAmelCase ( self : List[str] )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 3 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 1 |
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 1_00 * 2**20, 9_00 * 2**20] )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCAmelCase )
snake_case = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
snake_case = dataset_size < in_memory_max_size
else:
snake_case = False
snake_case = is_small_dataset(__lowerCAmelCase )
assert result == expected
| 3 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
snake_case = 0
snake_case = len(__lowerCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __lowerCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> str:
if len(__lowerCAmelCase ) <= 1:
return arr, 0
snake_case = len(__lowerCAmelCase ) // 2
snake_case = arr[0:mid]
snake_case = arr[mid:]
snake_case , snake_case = count_inversions_recursive(__lowerCAmelCase )
snake_case , snake_case = count_inversions_recursive(__lowerCAmelCase )
snake_case , snake_case = _count_cross_inversions(__lowerCAmelCase , __lowerCAmelCase )
snake_case = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Tuple:
snake_case = []
snake_case = snake_case = snake_case = 0
while i < len(__lowerCAmelCase ) and j < len(__lowerCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__lowerCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__lowerCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __lowerCamelCase ( ) -> Optional[int]:
snake_case = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
snake_case = count_inversions_bf(__lowerCAmelCase )
snake_case , snake_case = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("""number of inversions = """ , __lowerCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
snake_case = count_inversions_bf(__lowerCAmelCase )
snake_case , snake_case = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __lowerCAmelCase )
# an empty list should also have zero inversions
snake_case = []
snake_case = count_inversions_bf(__lowerCAmelCase )
snake_case , snake_case = count_inversions_recursive(__lowerCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("""number of inversions = """ , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 3 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
snake_case_ = 42
snake_case_ = None
snake_case_ = None
_SCREAMING_SNAKE_CASE = namedtuple("CoinsDistribResult", "moves excess")
def __lowerCamelCase ( __lowerCAmelCase : TreeNode | None ) -> int:
if root is None:
return 0
# Validation
def count_nodes(__lowerCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__lowerCAmelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__lowerCAmelCase ) != count_coins(__lowerCAmelCase ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(__lowerCAmelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
snake_case , snake_case = get_distrib(node.left )
snake_case , snake_case = get_distrib(node.right )
snake_case = 1 - left_distrib_excess
snake_case = 1 - right_distrib_excess
snake_case = (
left_distrib_moves
+ right_distrib_moves
+ abs(__lowerCAmelCase )
+ abs(__lowerCAmelCase )
)
snake_case = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__lowerCAmelCase , __lowerCAmelCase )
return get_distrib(__lowerCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE = {
"google/rembert": 256,
}
_SCREAMING_SNAKE_CASE = "▁"
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = RemBertTokenizer
def __init__( self : List[Any] , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : List[str]=True , __snake_case : Any=True , __snake_case : Tuple=False , __snake_case : Optional[int]="[CLS]" , __snake_case : Tuple="[SEP]" , __snake_case : str="<unk>" , __snake_case : List[str]="[SEP]" , __snake_case : Union[str, Any]="<pad>" , __snake_case : List[str]="[CLS]" , __snake_case : Optional[Any]="[MASK]" , **__snake_case : str , )-> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
__snake_case , tokenizer_file=__snake_case , do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , **__snake_case , )
snake_case = do_lower_case
snake_case = remove_space
snake_case = keep_accents
snake_case = vocab_file
snake_case = False if not self.vocab_file else True
def lowerCAmelCase ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1]
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : Optional[List[int]] = None )-> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__snake_case ) )
return
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
return (out_vocab_file,)
| 3 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image"]
snake_case_ = [
"image_embeds",
"negative_image_embeds",
"image",
]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
return 32
@property
def lowerCAmelCase ( self : Union[str, Any] )-> Optional[int]:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Dict:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : List[str] )-> Tuple:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Dict )-> str:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Optional[int]=0 )-> List[str]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = """A red cartoon frog, 4k"""
snake_case = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def __lowerCamelCase ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 3 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] )-> Any:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__snake_case )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[str] , **__snake_case : int )-> List[str]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).image_processor
def lowerCAmelCase ( self : Any )-> Dict:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : List[Any] )-> Any:
snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def lowerCAmelCase ( self : int )-> Union[str, Any]:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__snake_case )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__snake_case , return_tensors="""np""" )
snake_case = processor(images=__snake_case , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def lowerCAmelCase ( self : List[str] )-> Any:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__snake_case )
snake_case = [torch.ones((1, 3, 5, 5) )]
snake_case = [[17_64, 26_46]]
snake_case = [[6_83, 10_24]]
snake_case = processor.post_process_masks(__snake_case , __snake_case , __snake_case )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case = processor.post_process_masks(
__snake_case , torch.tensor(__snake_case ) , torch.tensor(__snake_case ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
snake_case = [np.ones((1, 3, 5, 5) )]
snake_case = processor.post_process_masks(__snake_case , np.array(__snake_case ) , np.array(__snake_case ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case = [[1, 0], [0, 1]]
with self.assertRaises(__snake_case ):
snake_case = processor.post_process_masks(__snake_case , np.array(__snake_case ) , np.array(__snake_case ) )
@require_vision
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] )-> Union[str, Any]:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__snake_case )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] , **__snake_case : Dict )-> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).image_processor
def lowerCAmelCase ( self : Tuple )-> int:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Any )-> Union[str, Any]:
snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : List[Any] )-> Union[str, Any]:
snake_case = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
snake_case = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def lowerCAmelCase ( self : Any )-> int:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__snake_case )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__snake_case , return_tensors="""np""" )
snake_case = processor(images=__snake_case , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def lowerCAmelCase ( self : Optional[int] )-> str:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__snake_case )
snake_case = [tf.ones((1, 3, 5, 5) )]
snake_case = [[17_64, 26_46]]
snake_case = [[6_83, 10_24]]
snake_case = processor.post_process_masks(__snake_case , __snake_case , __snake_case , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case = processor.post_process_masks(
__snake_case , tf.convert_to_tensor(__snake_case ) , tf.convert_to_tensor(__snake_case ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
snake_case = [np.ones((1, 3, 5, 5) )]
snake_case = processor.post_process_masks(
__snake_case , np.array(__snake_case ) , np.array(__snake_case ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
snake_case = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
snake_case = processor.post_process_masks(
__snake_case , np.array(__snake_case ) , np.array(__snake_case ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str )-> List[Any]:
snake_case = tempfile.mkdtemp()
snake_case = SamImageProcessor()
snake_case = SamProcessor(__snake_case )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Dict , **__snake_case : Tuple )-> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).image_processor
def lowerCAmelCase ( self : str )-> Any:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Dict )-> Dict:
snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCAmelCase ( self : int )-> Union[str, Any]:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__snake_case )
snake_case = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
snake_case = [tf.convert_to_tensor(__snake_case )]
snake_case = [torch.tensor(__snake_case )]
snake_case = [[17_64, 26_46]]
snake_case = [[6_83, 10_24]]
snake_case = processor.post_process_masks(
__snake_case , __snake_case , __snake_case , return_tensors="""tf""" )
snake_case = processor.post_process_masks(
__snake_case , __snake_case , __snake_case , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCAmelCase ( self : Union[str, Any] )-> str:
snake_case = self.get_image_processor()
snake_case = SamProcessor(image_processor=__snake_case )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__snake_case , return_tensors="""pt""" )["""pixel_values"""].numpy()
snake_case = processor(images=__snake_case , return_tensors="""pt""" )["""pixel_values"""].numpy()
snake_case = image_processor(__snake_case , return_tensors="""tf""" )["""pixel_values"""].numpy()
snake_case = processor(images=__snake_case , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(__snake_case , __snake_case ) )
self.assertTrue(np.allclose(__snake_case , __snake_case ) )
self.assertTrue(np.allclose(__snake_case , __snake_case ) )
| 3 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : Tuple , __snake_case : Dict=3 , __snake_case : str=32 , __snake_case : List[str]=3 , __snake_case : Dict=10 , __snake_case : Any=[10, 20, 30, 40] , __snake_case : List[Any]=[1, 1, 2, 1] , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : Tuple="relu" , __snake_case : List[str]=3 , __snake_case : Optional[Any]=None , )-> Tuple:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = num_channels
snake_case = embeddings_size
snake_case = hidden_sizes
snake_case = depths
snake_case = is_training
snake_case = use_labels
snake_case = hidden_act
snake_case = num_labels
snake_case = scope
snake_case = len(__snake_case )
def lowerCAmelCase ( self : int )-> Union[str, Any]:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase ( self : List[str] , __snake_case : str , __snake_case : List[Any] , __snake_case : Dict )-> Dict:
snake_case = RegNetModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Dict , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] )-> Union[str, Any]:
snake_case = self.num_labels
snake_case = RegNetForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
snake_case_ = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = RegNetModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def lowerCAmelCase ( self : List[str] )-> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : List[str] )-> Dict:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase ( self : Dict )-> Dict:
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
pass
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCAmelCase ( self : Any )-> Dict:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(config=__snake_case )
for name, module in model.named_modules():
if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
def check_hidden_states_output(__snake_case : int , __snake_case : Optional[int] , __snake_case : str ):
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(__snake_case , __snake_case ) )
snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case = self.model_tester.num_stages
self.assertEqual(len(__snake_case ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case = layer_type
snake_case = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def lowerCAmelCase ( self : Tuple )-> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = RegNetModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __lowerCamelCase ( ) -> Any:
snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : List[str] )-> Dict:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Optional[Any] )-> Dict:
snake_case = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__snake_case )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
# verify the logits
snake_case = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __snake_case )
snake_case = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4 ) )
| 3 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __snake_case : Dict , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : Tuple=True , __snake_case : Union[str, Any]=True , __snake_case : Optional[Any]=99 , __snake_case : Union[str, Any]=16 , __snake_case : Optional[Any]=36 , __snake_case : int=6 , __snake_case : Tuple=6 , __snake_case : List[str]=6 , __snake_case : Optional[int]=37 , __snake_case : Tuple="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=0.1 , __snake_case : str=5_12 , __snake_case : List[Any]=16 , __snake_case : Optional[int]=2 , __snake_case : Tuple=0.02 , __snake_case : int=3 , __snake_case : List[str]=4 , __snake_case : str=None , )-> int:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = embedding_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_hidden_groups
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def lowerCAmelCase ( self : Tuple )-> Any:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] )-> int:
snake_case = AlbertModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
snake_case = model(__snake_case , token_type_ids=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] )-> Any:
snake_case = AlbertForPreTraining(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , sentence_order_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : List[Any] )-> Any:
snake_case = AlbertForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : str , __snake_case : str )-> Union[str, Any]:
snake_case = AlbertForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Dict , __snake_case : Any , __snake_case : Any , __snake_case : Dict , __snake_case : Tuple , __snake_case : str , __snake_case : List[str] , __snake_case : int )-> Any:
snake_case = self.num_labels
snake_case = AlbertForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Any , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple )-> List[Any]:
snake_case = self.num_labels
snake_case = AlbertForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : str )-> Tuple:
snake_case = self.num_choices
snake_case = AlbertForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def lowerCAmelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Tuple=False )-> List[str]:
snake_case = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case )
snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def lowerCAmelCase ( self : Any )-> Any:
snake_case = AlbertModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowerCAmelCase ( self : int )-> List[str]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int )-> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
def lowerCAmelCase ( self : Optional[int] )-> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowerCAmelCase ( self : str )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*__snake_case )
@slow
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = AlbertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any )-> Tuple:
snake_case = AlbertModel.from_pretrained("""albert-base-v2""" )
snake_case = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case = model(__snake_case , attention_mask=__snake_case )[0]
snake_case = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __snake_case )
snake_case = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1e-4 ) )
| 3 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
snake_case = collections.OrderedDict()
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
snake_case = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case = token.rstrip("""\n""" )
snake_case = index
return vocab
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]:
snake_case = vocab
snake_case = unk_token
snake_case = max_input_chars_per_word
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]:
snake_case = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case = 0
snake_case = []
while start < len(__snake_case ):
snake_case = len(__snake_case )
snake_case = None
while start < end:
snake_case = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
snake_case = end
return sub_tokens
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
snake_case = bod_token
snake_case = eod_token
snake_case = load_vocab(__snake_case )
snake_case = self.encoder[space_token]
snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : str )-> Tuple:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] )-> int:
return len(self.encoder )
def lowerCAmelCase ( self : Any )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]:
snake_case = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]:
snake_case = [i for i in token_ids if i >= 0]
snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]:
return token in self.encoder
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
return "".join(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if os.path.isdir(__snake_case ):
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case = 0
if " " in self.encoder:
snake_case = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 3 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["input_features", "attention_mask"]
def __init__( self : Union[str, Any] , __snake_case : Optional[int]=80 , __snake_case : int=1_60_00 , __snake_case : Union[str, Any]=80 , __snake_case : Optional[int]=0.0 , __snake_case : Dict=True , __snake_case : Any=True , __snake_case : List[Any]=True , **__snake_case : Any , )-> Tuple:
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
snake_case = num_mel_bins
snake_case = do_ceptral_normalize
snake_case = normalize_means
snake_case = normalize_vars
snake_case = True
def lowerCAmelCase ( self : List[str] , __snake_case : np.ndarray , )-> np.ndarray:
snake_case = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
snake_case = torch.from_numpy(__snake_case ).unsqueeze(0 )
snake_case = ta_kaldi.fbank(__snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCAmelCase ( __snake_case : np.ndarray , __snake_case : int , __snake_case : Optional[bool] = True , __snake_case : Optional[bool] = True , __snake_case : float = 0.0 , )-> np.ndarray:
# make sure we normalize float32 arrays
if normalize_means:
snake_case = x[:input_length].mean(axis=0 )
snake_case = np.subtract(__snake_case , __snake_case )
if normalize_vars:
snake_case = x[:input_length].std(axis=0 )
snake_case = np.divide(__snake_case , __snake_case )
if input_length < x.shape[0]:
snake_case = padding_value
# make sure array is in float32
snake_case = x.astype(np.floataa )
return x
def lowerCAmelCase ( self : List[Any] , __snake_case : List[np.ndarray] , __snake_case : Optional[np.ndarray] = None )-> List[np.ndarray]:
snake_case = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(__snake_case , __snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(__snake_case , __snake_case )
]
def __call__( self : List[str] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , **__snake_case : Union[str, Any] , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
snake_case = isinstance(__snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
snake_case = is_batched_numpy or (
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
snake_case = [np.asarray(__snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
snake_case = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
snake_case = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
snake_case = [raw_speech]
# extract fbank features
snake_case = [self._extract_fbank_features(__snake_case ) for waveform in raw_speech]
# convert into correct format for padding
snake_case = BatchFeature({"""input_features""": features} )
snake_case = self.pad(
__snake_case , padding=__snake_case , max_length=__snake_case , truncation=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , **__snake_case , )
# make sure list is in array format
snake_case = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __snake_case ):
snake_case = [np.asarray(__snake_case , dtype=np.floataa ) for feature in input_features]
snake_case = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
snake_case = [np.asarray(__snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
snake_case = (
np.array(__snake_case , dtype=np.intaa )
if self._get_padding_strategies(__snake_case , max_length=__snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
snake_case = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__snake_case )
if return_tensors is not None:
snake_case = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 3 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "BlipImageProcessor"
snake_case_ = "AutoTokenizer"
def __init__( self : Any , __snake_case : Dict , __snake_case : Tuple , __snake_case : Any )-> int:
super().__init__(__snake_case , __snake_case )
# add QFormer tokenizer
snake_case = qformer_tokenizer
def __call__( self : str , __snake_case : ImageInput = None , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Any , )-> BatchFeature:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case = BatchFeature()
if text is not None:
snake_case = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
encoding.update(__snake_case )
snake_case = self.qformer_tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
snake_case = qformer_text_encoding.pop("""input_ids""" )
snake_case = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case = self.image_processor(__snake_case , return_tensors=__snake_case )
encoding.update(__snake_case )
return encoding
def lowerCAmelCase ( self : Any , *__snake_case : int , **__snake_case : Optional[int] )-> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : int , *__snake_case : int , **__snake_case : str )-> Union[str, Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase ( self : Union[str, Any] )-> List[str]:
snake_case = self.tokenizer.model_input_names
snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase ( self : Tuple , __snake_case : Dict , **__snake_case : Optional[Any] )-> Optional[int]:
if os.path.isfile(__snake_case ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
snake_case = os.path.join(__snake_case , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(__snake_case )
return super().save_pretrained(__snake_case , **__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[str] , __snake_case : str , **__snake_case : int )-> Tuple:
snake_case = AutoTokenizer.from_pretrained(__snake_case , subfolder="""qformer_tokenizer""" )
snake_case = cls._get_arguments_from_pretrained(__snake_case , **__snake_case )
args.append(__snake_case )
return cls(*__snake_case )
| 3 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = soup.findAll("""h1""" )
snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 3 | 1 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
snake_case = b * b - 4 * a * c
snake_case = (-b + sqrt(__lowerCAmelCase )) / (2 * a)
snake_case = (-b - sqrt(__lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __lowerCamelCase ( ) -> str:
snake_case , snake_case = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 3 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_SCREAMING_SNAKE_CASE = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
_SCREAMING_SNAKE_CASE = {
"169M": 768,
"430M": 1024,
"1B5": 2048,
"3B": 2560,
"7B": 4096,
"14B": 5120,
}
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> str:
snake_case = list(state_dict.keys() )
for name in state_dict_keys:
snake_case = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("""emb.""" ):
snake_case = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
snake_case = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
snake_case = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , __lowerCAmelCase )
# ffn -> feed_forward
snake_case = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
snake_case = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
snake_case = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
snake_case = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
snake_case = """rwkv.""" + name
snake_case = weight
return state_dict
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=None ) -> Optional[Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
snake_case = 5_02_77
snake_case = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
snake_case = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
snake_case = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
snake_case = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
snake_case = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
snake_case = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
snake_case = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
snake_case , snake_case = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
snake_case = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + """\n"""
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
snake_case = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
snake_case = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 3 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 500000
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( __lowerCAmelCase : datasets.Dataset , **__lowerCAmelCase : int ) -> Tuple:
snake_case = dataset.map(**__lowerCAmelCase )
@get_duration
def __lowerCamelCase ( __lowerCAmelCase : datasets.Dataset , **__lowerCAmelCase : Dict ) -> Dict:
snake_case = dataset.filter(**__lowerCAmelCase )
def __lowerCamelCase ( ) -> int:
snake_case = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case = generate_example_dataset(
os.path.join(__lowerCAmelCase , """dataset.arrow""" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase )
snake_case = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__lowerCAmelCase )
def tokenize(__lowerCAmelCase : str ):
return tokenizer(examples["""text"""] )
snake_case = map(__lowerCAmelCase )
snake_case = map(__lowerCAmelCase , batched=__lowerCAmelCase )
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""numpy""" ):
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""pandas""" ):
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
snake_case = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase )
snake_case = filter(__lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 3 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3 | 1 |
'''simple docstring'''
from typing import Any
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : dict , ) -> list:
_validation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
# Creates data structures and fill initial step
snake_case = {}
snake_case = {}
for state in states_space:
snake_case = observations_space[0]
snake_case = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__lowerCAmelCase ) ):
snake_case = observations_space[o]
snake_case = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case = """"""
snake_case = -1
for k_state in states_space:
snake_case = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case = probability
snake_case = k_state
# Update probabilities and pointers dicts
snake_case = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case = arg_max
# The final observation
snake_case = observations_space[len(__lowerCAmelCase ) - 1]
# argmax for given final observation
snake_case = """"""
snake_case = -1
for k_state in states_space:
snake_case = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case = probability
snake_case = k_state
snake_case = arg_max
# Process pointers backwards
snake_case = last_state
snake_case = []
for o in range(len(__lowerCAmelCase ) - 1 , -1 , -1 ):
result.append(__lowerCAmelCase )
snake_case = pointers[previous, observations_space[o]]
result.reverse()
return result
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , ) -> None:
_validate_not_empty(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_validate_lists(__lowerCAmelCase , __lowerCAmelCase )
_validate_dicts(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> None:
_validate_list(__lowerCAmelCase , """observations_space""" )
_validate_list(__lowerCAmelCase , """states_space""" )
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> None:
if not isinstance(_object , __lowerCAmelCase ):
snake_case = F'''{var_name} must be a list'''
raise ValueError(__lowerCAmelCase )
else:
for x in _object:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = F'''{var_name} must be a list of strings'''
raise ValueError(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Any , ) -> None:
_validate_dict(__lowerCAmelCase , """initial_probabilities""" , __lowerCAmelCase )
_validate_nested_dict(__lowerCAmelCase , """transition_probabilities""" )
_validate_nested_dict(__lowerCAmelCase , """emission_probabilities""" )
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> None:
_validate_dict(_object , __lowerCAmelCase , __lowerCAmelCase )
for x in _object.values():
_validate_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : type , __lowerCAmelCase : bool = False ) -> None:
if not isinstance(_object , __lowerCAmelCase ):
snake_case = F'''{var_name} must be a dict'''
raise ValueError(__lowerCAmelCase )
if not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for x in _object ):
snake_case = F'''{var_name} all keys must be strings'''
raise ValueError(__lowerCAmelCase )
if not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for x in _object.values() ):
snake_case = """nested dictionary """ if nested else """"""
snake_case = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 3 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.