code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 50000
_SCREAMING_SNAKE_CASE = 5000
_SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( __lowerCAmelCase : datasets.Dataset , __lowerCAmelCase : Tuple ) -> Optional[Any]:
for i in range(__lowerCAmelCase ):
snake_case = dataset[i]
@get_duration
def __lowerCamelCase ( __lowerCAmelCase : datasets.Dataset , __lowerCAmelCase : Tuple , __lowerCAmelCase : int ) -> Any:
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
snake_case = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( __lowerCAmelCase : datasets.Dataset , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
with dataset.formatted_as(type=__lowerCAmelCase ):
for i in range(__lowerCAmelCase ):
snake_case = dataset[i]
@get_duration
def __lowerCamelCase ( __lowerCAmelCase : datasets.Dataset , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ) -> int:
with dataset.formatted_as(type=__lowerCAmelCase ):
for i in range(0 , __lowerCAmelCase , __lowerCAmelCase ):
snake_case = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> Union[str, Any]:
snake_case = {"""num examples""": SPEED_TEST_N_EXAMPLES}
snake_case = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
snake_case = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
snake_case = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
snake_case = generate_example_dataset(
os.path.join(__lowerCAmelCase , """dataset.arrow""" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase , seq_shapes={"""list""": (1_00,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(__lowerCAmelCase ) )
snake_case = func(__lowerCAmelCase , **__lowerCAmelCase )
print("""shuffling dataset""" )
snake_case = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(__lowerCAmelCase ) )
snake_case = func(
__lowerCAmelCase , **__lowerCAmelCase )
with open(__lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 367 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "LayoutLMv3ImageProcessor"
snake_case_ = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Tuple , __snake_case : Union[str, Any]=None , __snake_case : Tuple=None , **__snake_case : Dict )-> int:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase_ , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Optional[Any] , __snake_case : Any , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __snake_case : Union[List[List[int]], List[List[List[int]]]] = None , __snake_case : Optional[Union[List[int], List[List[int]]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[str] , )-> BatchEncoding:
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
snake_case = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case = features["""words"""]
snake_case = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
snake_case = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
snake_case = self.get_overflowing_images(lowercase_ , encoded_inputs["""overflow_to_sample_mapping"""] )
snake_case = images
return encoded_inputs
def lowerCAmelCase ( self : str , __snake_case : List[str] , __snake_case : List[str] )-> List[Any]:
snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def lowerCAmelCase ( self : int , *__snake_case : Tuple , **__snake_case : Tuple )-> Any:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def lowerCAmelCase ( self : Any , *__snake_case : Union[str, Any] , **__snake_case : Dict )-> Any:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def lowerCAmelCase ( self : Optional[Any] )-> str:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowerCAmelCase ( self : int )-> List[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase_ , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : Any )-> Dict:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase_ , )
return self.image_processor
| 368 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=False ) -> str:
snake_case = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
snake_case = "segformer.encoder." + key
if key.startswith("""backbone""" ):
snake_case = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
snake_case = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
snake_case = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_lowerCamelCase )-1}''' )
if "norm" in key:
snake_case = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
snake_case = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
snake_case = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_lowerCamelCase )-1}''' )
if "layer_norm1" in key:
snake_case = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
snake_case = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
snake_case = key[key.find("""block""" ) + len("""block""" )]
snake_case = key.replace(F'''block{idx}''' , F'''block.{int(_lowerCamelCase )-1}''' )
if "attn.q" in key:
snake_case = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
snake_case = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
snake_case = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
snake_case = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
snake_case = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
snake_case = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
snake_case = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
snake_case = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
snake_case = key[key.find("""linear_c""" ) + len("""linear_c""" )]
snake_case = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_lowerCamelCase )-1}''' )
if key.startswith("""head""" ):
snake_case = key.replace("""head""" , """classifier""" )
snake_case = value
return new_state_dict
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
snake_case = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
snake_case = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
snake_case = kv_weight[
: config.hidden_sizes[i], :
]
snake_case = kv_bias[: config.hidden_sizes[i]]
snake_case = kv_weight[
config.hidden_sizes[i] :, :
]
snake_case = kv_bias[
config.hidden_sizes[i] :
]
def __lowerCamelCase ( ) -> Optional[int]:
snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
snake_case = SegformerConfig()
snake_case = False
# set attributes based on model_name
snake_case = "huggingface/label-files"
if "segformer" in model_name:
snake_case = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
snake_case = 1_50
snake_case = "ade20k-id2label.json"
snake_case = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
snake_case = 19
snake_case = "cityscapes-id2label.json"
snake_case = (1, 19, 1_28, 1_28)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
snake_case = True
snake_case = model_name[4:6]
snake_case = 10_00
snake_case = "imagenet-1k-id2label.json"
snake_case = (1, 10_00)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
snake_case = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
snake_case = [64, 1_28, 3_20, 5_12]
snake_case = 2_56
elif size == "b2":
snake_case = [64, 1_28, 3_20, 5_12]
snake_case = 7_68
snake_case = [3, 4, 6, 3]
elif size == "b3":
snake_case = [64, 1_28, 3_20, 5_12]
snake_case = 7_68
snake_case = [3, 4, 18, 3]
elif size == "b4":
snake_case = [64, 1_28, 3_20, 5_12]
snake_case = 7_68
snake_case = [3, 8, 27, 3]
elif size == "b5":
snake_case = [64, 1_28, 3_20, 5_12]
snake_case = 7_68
snake_case = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
snake_case = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
snake_case = prepare_img()
snake_case = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
snake_case = torch.load(_lowerCamelCase , map_location=torch.device("""cpu""" ) )
else:
snake_case = torch.load(_lowerCamelCase , map_location=torch.device("""cpu""" ) )["state_dict"]
# rename keys
snake_case = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
snake_case = False
snake_case = SegformerForImageClassification(_lowerCamelCase )
else:
snake_case = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
snake_case = model(_lowerCamelCase )
snake_case = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
snake_case = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
snake_case = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
snake_case = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
snake_case = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
snake_case = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
snake_case = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
snake_case = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
snake_case = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
snake_case = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
snake_case = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
snake_case = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
snake_case = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
snake_case = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
snake_case = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
snake_case = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
snake_case = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 369 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 0 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def __lowerCamelCase ( __lowerCAmelCase : int ) -> List[str]:
snake_case = tf.convert_to_tensor(__lowerCAmelCase )
snake_case = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = tf.convert_to_tensor(__lowerCAmelCase )
snake_case = tf.cast(math.pi , x.dtype )
snake_case = tf.cast(0.04_4715 , x.dtype )
snake_case = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__lowerCAmelCase , 3 )) ))
return x * cdf
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> int:
snake_case = tf.convert_to_tensor(__lowerCAmelCase )
return x * tf.tanh(tf.math.softplus(__lowerCAmelCase ) )
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> int:
snake_case = tf.convert_to_tensor(__lowerCAmelCase )
snake_case = tf.cast(0.04_4715 , x.dtype )
snake_case = tf.cast(0.79_7884_5608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = tf.convert_to_tensor(__lowerCAmelCase )
snake_case = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Dict:
return tf.clip_by_value(_gelu(__lowerCAmelCase ) , -10 , 10 )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=-1 ) -> int:
snake_case = tf.split(__lowerCAmelCase , 2 , axis=__lowerCAmelCase )
return a * tf.math.sigmoid(__lowerCAmelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
return tf.keras.activations.gelu(__lowerCAmelCase , approximate=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = tf.keras.activations.gelu
_SCREAMING_SNAKE_CASE = approximate_gelu_wrap
else:
_SCREAMING_SNAKE_CASE = _gelu
_SCREAMING_SNAKE_CASE = _gelu_new
_SCREAMING_SNAKE_CASE = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> List[str]:
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 370 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ = (KDPMaDiscreteScheduler,)
snake_case_ = 10
def lowerCAmelCase ( self : str , **__snake_case : Optional[int] )-> Dict:
snake_case = {
"num_train_timesteps": 11_00,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_snake_case )
return config
def lowerCAmelCase ( self : Tuple )-> List[str]:
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def lowerCAmelCase ( self : str )-> Any:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=_snake_case , beta_end=_snake_case )
def lowerCAmelCase ( self : int )-> str:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def lowerCAmelCase ( self : Any )-> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def lowerCAmelCase ( self : Dict )-> Any:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(prediction_type="""v_prediction""" )
snake_case = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
snake_case = scheduler.scale_model_input(_snake_case , _snake_case )
snake_case = model(_snake_case , _snake_case )
snake_case = scheduler.step(_snake_case , _snake_case , _snake_case )
snake_case = output.prev_sample
snake_case = torch.sum(torch.abs(_snake_case ) )
snake_case = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def lowerCAmelCase ( self : Optional[int] )-> Tuple:
if torch_device == "mps":
return
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
snake_case = scheduler.scale_model_input(_snake_case , _snake_case )
snake_case = model(_snake_case , _snake_case )
snake_case = scheduler.step(_snake_case , _snake_case , _snake_case )
snake_case = output.prev_sample
snake_case = torch.sum(torch.abs(_snake_case ) )
snake_case = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def lowerCAmelCase ( self : str )-> Any:
if torch_device == "mps":
return
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps , device=_snake_case )
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case = scheduler.scale_model_input(_snake_case , _snake_case )
snake_case = model(_snake_case , _snake_case )
snake_case = scheduler.step(_snake_case , _snake_case , _snake_case )
snake_case = output.prev_sample
snake_case = torch.sum(torch.abs(_snake_case ) )
snake_case = torch.mean(torch.abs(_snake_case ) )
if str(_snake_case ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 350 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 0 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
_SCREAMING_SNAKE_CASE = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
_SCREAMING_SNAKE_CASE = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
_SCREAMING_SNAKE_CASE = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def lowerCAmelCase ( self : Any , __snake_case : Union[str, Any]=None , __snake_case : str=None , __snake_case : Union[str, Any]=False )-> Union[str, Any]:
if concatenate_texts:
return compute_measures(__snake_case , __snake_case )["wer"]
else:
snake_case = 0
snake_case = 0
for prediction, reference in zip(__snake_case , __snake_case ):
snake_case = compute_measures(__snake_case , __snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( _lowerCamelCase ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 352 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __snake_case : List[str]=False , __snake_case : Optional[Any]=False , __snake_case : Union[str, Any]=6.0 , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : Optional[Any]=None , __snake_case : int="fp4" , __snake_case : int=False , **__snake_case : Optional[Any] , )-> List[Any]:
snake_case = load_in_abit
snake_case = load_in_abit
snake_case = llm_inta_threshold
snake_case = llm_inta_skip_modules
snake_case = llm_inta_enable_fpaa_cpu_offload
snake_case = llm_inta_has_fpaa_weight
snake_case = bnb_abit_quant_type
snake_case = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
snake_case = torch.floataa
elif isinstance(__snake_case , __snake_case ):
snake_case = getattr(__snake_case , __snake_case )
elif isinstance(__snake_case , torch.dtype ):
snake_case = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def lowerCAmelCase ( self : str )-> str:
if not isinstance(self.llm_inta_threshold , __snake_case ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __snake_case ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __snake_case ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , __snake_case ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , __snake_case ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , __snake_case ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def lowerCAmelCase ( self : Union[str, Any] )-> Optional[Any]:
return self.load_in_abit or self.load_in_abit
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def lowerCAmelCase ( cls : List[str] , __snake_case : Dict , __snake_case : str , **__snake_case : Dict )-> Tuple:
snake_case = cls(**__snake_case )
snake_case = []
for key, value in kwargs.items():
if hasattr(__snake_case , __snake_case ):
setattr(__snake_case , __snake_case , __snake_case )
to_remove.append(__snake_case )
for key in to_remove:
kwargs.pop(__snake_case , __snake_case )
if return_unused_kwargs:
return config, kwargs
else:
return config
def lowerCAmelCase ( self : Tuple , __snake_case : Union[str, os.PathLike] )-> Optional[int]:
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
snake_case = self.to_dict()
snake_case = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
writer.write(__snake_case )
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case = copy.deepcopy(self.__dict__ )
snake_case = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : List[str] )-> Any:
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def lowerCAmelCase ( self : List[str] , __snake_case : bool = True )-> str:
if use_diff is True:
snake_case = self.to_diff_dict()
else:
snake_case = self.to_dict()
return json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
def lowerCAmelCase ( self : Dict )-> Tuple:
snake_case = self.to_dict()
# get the default config dict
snake_case = BitsAndBytesConfig().to_dict()
snake_case = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
snake_case = value
return serializable_config_dict
| 353 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 0 |
'''simple docstring'''
import pprint
import requests
_SCREAMING_SNAKE_CASE = "https://zenquotes.io/api"
def __lowerCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def __lowerCamelCase ( ) -> list:
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = random_quotes()
pprint.pprint(response)
| 354 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowerCamelCase ( ) -> int:
snake_case , snake_case = 9, 14 # noqa: F841
snake_case = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case = defaultdict(__UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
snake_case = mst(__UpperCAmelCase )
snake_case = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
snake_case = tuple(answer[:2] )
snake_case = tuple(edge[::-1] )
assert edge in result or reverse in result
| 355 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 0 |
def __lowerCamelCase ( __lowerCAmelCase : int = 50 ) -> int:
snake_case = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 356 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
snake_case = collections.OrderedDict()
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
snake_case = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case = token.rstrip("""\n""" )
snake_case = index
return vocab
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]:
snake_case = vocab
snake_case = unk_token
snake_case = max_input_chars_per_word
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]:
snake_case = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case = 0
snake_case = []
while start < len(__snake_case ):
snake_case = len(__snake_case )
snake_case = None
while start < end:
snake_case = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
snake_case = end
return sub_tokens
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
snake_case = bod_token
snake_case = eod_token
snake_case = load_vocab(__snake_case )
snake_case = self.encoder[space_token]
snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : str )-> Tuple:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] )-> int:
return len(self.encoder )
def lowerCAmelCase ( self : Any )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]:
snake_case = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]:
snake_case = [i for i in token_ids if i >= 0]
snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]:
return token in self.encoder
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
return "".join(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if os.path.isdir(__snake_case ):
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case = 0
if " " in self.encoder:
snake_case = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 3 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = GPTaTokenizer
snake_case_ = GPTaTokenizerFast
snake_case_ = True
snake_case_ = {"add_prefix_space": True}
snake_case_ = False
def lowerCAmelCase ( self : Optional[int] )-> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def lowerCAmelCase ( self : Dict , **__snake_case : Optional[int] )-> Tuple:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def lowerCAmelCase ( self : List[str] , **__snake_case : str )-> Dict:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[Any] )-> Optional[int]:
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def lowerCAmelCase ( self : Optional[Any] )-> int:
snake_case = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''lower newer'''
snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def lowerCAmelCase ( self : Optional[Any] )-> int:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
snake_case = '''lower newer'''
# Testing tokenization
snake_case = tokenizer.tokenize(lowercase_ , add_prefix_space=lowercase_ )
snake_case = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids without special tokens
snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ , add_prefix_space=lowercase_ )
snake_case = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing conversion to ids with special tokens
snake_case = self.get_rust_tokenizer(add_prefix_space=lowercase_ )
snake_case = tokenizer.encode(lowercase_ , add_prefix_space=lowercase_ )
snake_case = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# Testing the unknown token
snake_case = tokens + [rust_tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase_ ) , lowercase_ )
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : Optional[Any] )-> Any:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Optional[Any]=15 )-> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowercase_ , tokenizer_r.encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowercase_ , tokenizer_r.batch_encode_plus , lowercase_ , max_length=lowercase_ , padding="""max_length""" , )
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case = tokenizer.pad_token_id
snake_case = tokenizer(lowercase_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
snake_case = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="""np""" )
snake_case = tokenizer(*lowercase_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
snake_case = tokenizer(lowercase_ , padding=lowercase_ , truncate=lowercase_ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def lowerCAmelCase ( self : Dict )-> List[str]:
snake_case = '''$$$'''
snake_case = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase_ , add_bos_token=lowercase_ )
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = tokenizer.bos_token_id
snake_case = tokenizer(lowercase_ )
snake_case = tokenizer(lowercase_ )
self.assertEqual(out_s.input_ids[0] , lowercase_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case = tokenizer.decode(out_s.input_ids )
snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , lowercase_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowerCAmelCase ( self : Tuple )-> int:
pass
def lowerCAmelCase ( self : int )-> str:
# TODO: change to self.get_tokenizers() when the fast version is implemented
snake_case = [self.get_tokenizer(do_lower_case=lowercase_ , add_bos_token=lowercase_ )]
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case = '''Encode this.'''
snake_case = '''This one too please.'''
snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
encoded_sequence += tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case = tokenizer.encode_plus(
lowercase_ , lowercase_ , add_special_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , )
snake_case = encoded_sequence_dict['''input_ids''']
snake_case = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
snake_case = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase_ )
]
snake_case = [x for x in filtered_sequence if x is not None]
self.assertEqual(lowercase_ , lowercase_ )
@require_tokenizers
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> int:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
snake_case = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase_ )
snake_case = '''A photo of a cat'''
snake_case = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("""test_opt""" )
snake_case = AutoTokenizer.from_pretrained("""./test_opt""" )
snake_case = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowerCAmelCase ( self : List[Any] )-> Union[str, Any]:
snake_case = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=lowercase_ )
snake_case = '''A photo of a cat'''
snake_case = tokenizer.encode(
lowercase_ , )
# Same as above
self.assertEqual(lowercase_ , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def lowerCAmelCase ( self : Any )-> Tuple:
snake_case = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=lowercase_ )
snake_case = '''bos'''
snake_case = tokenizer.get_vocab()['''bos''']
snake_case = '''A photo of a cat'''
snake_case = tokenizer.encode(
lowercase_ , )
# We changed the bos token
self.assertEqual(lowercase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained("""./tok""" )
snake_case = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
snake_case = tokenizer.encode(
lowercase_ , )
self.assertEqual(lowercase_ , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 357 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 0 |
'''simple docstring'''
import math
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str = 0 , __lowerCAmelCase : int = 0 ) -> List[Any]:
snake_case = end or len(_lowercase )
for i in range(_lowercase , _lowercase ):
snake_case = i
snake_case = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
snake_case = array[temp_index - 1]
temp_index -= 1
snake_case = temp_index_value
return array
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ) -> Union[str, Any]: # Max Heap
snake_case = index
snake_case = 2 * index + 1 # Left Node
snake_case = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
snake_case = left_index
if right_index < heap_size and array[largest] < array[right_index]:
snake_case = right_index
if largest != index:
snake_case , snake_case = array[largest], array[index]
heapify(_lowercase , _lowercase , _lowercase )
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
snake_case = len(_lowercase )
for i in range(n // 2 , -1 , -1 ):
heapify(_lowercase , _lowercase , _lowercase )
for i in range(n - 1 , 0 , -1 ):
snake_case , snake_case = array[0], array[i]
heapify(_lowercase , 0 , _lowercase )
return array
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> Tuple:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = low
snake_case = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
snake_case , snake_case = array[j], array[i]
i += 1
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
if len(_lowercase ) == 0:
return array
snake_case = 2 * math.ceil(math.loga(len(_lowercase ) ) )
snake_case = 16
return intro_sort(_lowercase , 0 , len(_lowercase ) , _lowercase , _lowercase )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ) -> Optional[int]:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_lowercase )
max_depth -= 1
snake_case = median_of_a(_lowercase , _lowercase , start + ((end - start) // 2) + 1 , end - 1 )
snake_case = partition(_lowercase , _lowercase , _lowercase , _lowercase )
intro_sort(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
snake_case = p
return insertion_sort(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma : ").strip()
_SCREAMING_SNAKE_CASE = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 358 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = soup.findAll("""h1""" )
snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 3 | 0 |
import collections
import os
import re
from pathlib import Path
_SCREAMING_SNAKE_CASE = "src/transformers"
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_SCREAMING_SNAKE_CASE = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_SCREAMING_SNAKE_CASE = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_SCREAMING_SNAKE_CASE = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_SCREAMING_SNAKE_CASE = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_SCREAMING_SNAKE_CASE = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_SCREAMING_SNAKE_CASE = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_SCREAMING_SNAKE_CASE = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_SCREAMING_SNAKE_CASE = re.compile(r"^\s*try:")
# Catches a line with else:
_SCREAMING_SNAKE_CASE = re.compile(r"^\s*else:")
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> int:
if _re_test_backend.search(__lowerCAmelCase ) is None:
return None
snake_case = [b[0] for b in _re_backend.findall(__lowerCAmelCase )]
backends.sort()
return "_and_".join(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Dict:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case = f.readlines()
snake_case = 0
while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCAmelCase ):
snake_case = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0]
snake_case = re.findall(r"""\[([^\]]+)\]""" , __lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case = _re_import_struct_key_value.search(__lowerCAmelCase )
if single_line_import_search is not None:
snake_case = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case = lines[line_index]
if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None:
snake_case = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(""", """ )
snake_case = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_between_brackets.search(__lowerCAmelCase ) is not None:
snake_case = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(""", """ )
snake_case = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_quote_object.search(__lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
snake_case = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case = []
while (
line_index < len(__lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case = lines[line_index]
snake_case = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case = lines[line_index]
snake_case = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] ) -> Any:
def find_duplicates(__lowerCAmelCase : Union[str, Any] ):
return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case = []
for key in import_dict_objects.keys():
snake_case = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
snake_case = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def __lowerCamelCase ( ) -> List[Any]:
snake_case = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
snake_case = os.path.join(__lowerCAmelCase , """__init__.py""" )
snake_case = parse_init(__lowerCAmelCase )
if objects is not None:
snake_case = analyze_results(*__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
raise ValueError("""\n\n""".join(__lowerCAmelCase ) )
def __lowerCamelCase ( ) -> int:
snake_case = []
for path, directories, files in os.walk(__lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) )
snake_case = short_path.replace(os.path.sep , """.""" )
submodules.append(__lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
snake_case = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) )
snake_case = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__lowerCAmelCase )
return submodules
_SCREAMING_SNAKE_CASE = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def __lowerCamelCase ( ) -> str:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
snake_case = direct_transformers_import(__lowerCAmelCase )
snake_case = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" ) as f:
snake_case = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , __lowerCAmelCase ) ) )
snake_case = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__lowerCAmelCase ) > 0:
snake_case = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 359 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 0 |
from math import factorial
def __lowerCamelCase ( __lowerCAmelCase : int = 1_00 ) -> int:
return sum(map(_a , str(factorial(_a ) ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 360 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
snake_case_ = """transfo-xl"""
snake_case_ = ["""mems"""]
snake_case_ = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , __snake_case : Tuple=26_77_35 , __snake_case : Tuple=[2_00_00, 4_00_00, 20_00_00] , __snake_case : List[Any]=10_24 , __snake_case : List[Any]=10_24 , __snake_case : List[Any]=16 , __snake_case : Optional[int]=64 , __snake_case : Dict=40_96 , __snake_case : Any=4 , __snake_case : int=False , __snake_case : Any=18 , __snake_case : List[str]=16_00 , __snake_case : int=10_00 , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : List[Any]=0 , __snake_case : List[str]=-1 , __snake_case : int=True , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=0.0 , __snake_case : List[str]=True , __snake_case : Any="normal" , __snake_case : int=0.01 , __snake_case : List[Any]=0.01 , __snake_case : List[str]=0.02 , __snake_case : Tuple=1e-5 , __snake_case : Optional[Any]=0 , **__snake_case : Union[str, Any] , )-> Dict:
snake_case = vocab_size
snake_case = []
self.cutoffs.extend(snake_case__ )
if proj_share_all_but_first:
snake_case = [False] + [True] * len(self.cutoffs )
else:
snake_case = [False] + [False] * len(self.cutoffs )
snake_case = d_model
snake_case = d_embed
snake_case = d_head
snake_case = d_inner
snake_case = div_val
snake_case = pre_lnorm
snake_case = n_layer
snake_case = n_head
snake_case = mem_len
snake_case = same_length
snake_case = attn_type
snake_case = clamp_len
snake_case = sample_softmax
snake_case = adaptive
snake_case = dropout
snake_case = dropatt
snake_case = untie_r
snake_case = init
snake_case = init_range
snake_case = proj_init_std
snake_case = init_std
snake_case = layer_norm_epsilon
super().__init__(eos_token_id=snake_case__ , **snake_case__ )
@property
def lowerCAmelCase ( self : Tuple )-> List[Any]:
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def lowerCAmelCase ( self : List[Any] , __snake_case : Tuple )-> Union[str, Any]:
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 361 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3 | 0 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( __lowerCAmelCase : Image ) -> Image:
snake_case = image.size
snake_case = 0
snake_case = image.load()
for i in range(_lowercase ):
for j in range(_lowercase ):
snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(_lowercase ):
for i in range(_lowercase ):
snake_case = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 362 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 0 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCamelCase ( ) -> List[str]:
snake_case = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__lowerCAmelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__lowerCAmelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__lowerCAmelCase )
return parser.parse_args()
def __lowerCamelCase ( ) -> Dict:
snake_case = parse_args()
# Import training_script as a module.
snake_case = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case = script_fpath.stem
snake_case = importlib.import_module(__lowerCAmelCase )
# Patch sys.argv
snake_case = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> list[str]:
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
snake_case = number_of_bytes // partitions
snake_case = []
for i in range(snake_case__ ):
snake_case = i * bytes_per_partition + 1
snake_case = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __snake_case : Tuple , __snake_case : Optional[int]=12 , __snake_case : Optional[int]=7 , __snake_case : int=True , __snake_case : Union[str, Any]=True , __snake_case : Any=True , __snake_case : Optional[int]=99 , __snake_case : str=32 , __snake_case : Optional[Any]=32 , __snake_case : Any=2 , __snake_case : str=4 , __snake_case : Optional[Any]=37 , __snake_case : Dict=0.1 , __snake_case : Dict=0.1 , __snake_case : List[str]=5_12 , __snake_case : Dict=0.02 , __snake_case : str=0 , __snake_case : List[str]=None , )-> List[str]:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = projection_dim
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = dropout
snake_case = attention_dropout
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = scope
snake_case = bos_token_id
def lowerCAmelCase ( self : Tuple )-> Any:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
snake_case = input_mask.numpy()
snake_case = input_mask.shape
snake_case = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE_ ):
snake_case = 1
snake_case = 0
snake_case = self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase ( self : Any )-> List[str]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCAmelCase ( self : List[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] )-> List[str]:
snake_case = TFBlipTextModel(config=SCREAMING_SNAKE_CASE_ )
snake_case = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
snake_case = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Any )-> List[Any]:
snake_case = self.prepare_config_and_inputs()
snake_case = config_and_inputs
snake_case = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (TFBlipTextModel,) if is_tf_available() else ()
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowerCAmelCase ( self : Dict )-> Tuple:
snake_case = BlipTextModelTester(self )
snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] )-> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] )-> str:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase ( self : Tuple )-> int:
pass
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def lowerCAmelCase ( self : Tuple )-> Tuple:
pass
@slow
def lowerCAmelCase ( self : List[str] )-> Any:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : int=True )-> Union[str, Any]:
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE_ )
| 365 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
snake_case = SEWConfig()
if is_finetuned:
snake_case = model.wav_encoder.wav_model.cfg
else:
snake_case = model.cfg
snake_case = fs_config.conv_bias
snake_case = eval(fs_config.conv_feature_layers )
snake_case = [x[0] for x in conv_layers]
snake_case = [x[1] for x in conv_layers]
snake_case = [x[2] for x in conv_layers]
snake_case = """gelu"""
snake_case = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
snake_case = 0.0
snake_case = fs_config.activation_fn.name
snake_case = fs_config.encoder_embed_dim
snake_case = 0.02
snake_case = fs_config.encoder_ffn_embed_dim
snake_case = 1e-5
snake_case = fs_config.encoder_layerdrop
snake_case = fs_config.encoder_attention_heads
snake_case = fs_config.conv_pos_groups
snake_case = fs_config.conv_pos
snake_case = len(__lowerCAmelCase )
snake_case = fs_config.encoder_layers
snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case = model.cfg
snake_case = fs_config.final_dropout
snake_case = fs_config.layerdrop
snake_case = fs_config.activation_dropout
snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case = fs_config.attention_dropout
snake_case = fs_config.dropout_input
snake_case = fs_config.dropout
snake_case = fs_config.mask_channel_length
snake_case = fs_config.mask_channel_prob
snake_case = fs_config.mask_length
snake_case = fs_config.mask_prob
snake_case = """Wav2Vec2FeatureExtractor"""
snake_case = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : str=True ) -> Any:
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = convert_config(model[0] , __lowerCAmelCase )
snake_case = model[0].eval()
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = SEWForCTC(__lowerCAmelCase )
else:
snake_case = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : int=False , __lowerCAmelCase : int=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
snake_case = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
snake_case = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , force_download=not use_cached_models )
snake_case = config_class.from_json_file(SCREAMING_SNAKE_CASE__ )
snake_case = True
snake_case = True
print(F'''Building TensorFlow model from configuration: {config}''' )
snake_case = model_class(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
snake_case = cached_file(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
snake_case = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if compare_with_pt_model:
snake_case = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE__ ) # build the network
snake_case = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
snake_case = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ , state_dict=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
snake_case = pt_model(**pt_model.dummy_inputs )
snake_case = pto[0].numpy()
snake_case = tfo[0].numpy()
snake_case = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2e-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE__ , save_format="""h5""" )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=False , ):
if args_model_type is None:
snake_case = list(MODEL_CLASSES.keys() )
else:
snake_case = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE__ , start=1 ):
print("""=""" * 1_00 )
print(F''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE__ )}: {model_type}''' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
snake_case = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
snake_case = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
snake_case = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
snake_case = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE__ )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
snake_case = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , force_download=not use_cached_models )
else:
snake_case = config_shortcut_name
if model_shortcut_name in aws_model_maps:
snake_case = cached_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , force_download=not use_cached_models )
else:
snake_case = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
snake_case = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE__ , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE__ , config_file=SCREAMING_SNAKE_CASE__ , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=SCREAMING_SNAKE_CASE__ , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE__ )
os.remove(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> list[int]:
return [ord(lowerCamelCase_ ) - 96 for elem in plain]
def __lowerCamelCase ( __lowerCAmelCase : int ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def __lowerCamelCase ( ) -> None:
snake_case = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , lowerCamelCase_ )
print("""Decoded:""" , decode(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 367 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __a ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "FlavaImageProcessor"
snake_case_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __snake_case : Any=None , __snake_case : Dict=None , **__snake_case : Union[str, Any] )-> Optional[int]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCamelCase__ , )
snake_case = kwargs.pop("""feature_extractor""" )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
snake_case = self.image_processor
def __call__( self : Dict , __snake_case : Optional[ImageInput] = None , __snake_case : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = False , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[str] , )-> Optional[int]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
snake_case = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
if images is not None:
snake_case = self.image_processor(
UpperCamelCase__ , return_image_mask=UpperCamelCase__ , return_codebook_pixels=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
if text is not None and images is not None:
encoding.update(UpperCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] , *__snake_case : Tuple , **__snake_case : int )-> Dict:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : str , *__snake_case : str , **__snake_case : Optional[int] )-> List[Any]:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.tokenizer.model_input_names
snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase ( self : Tuple )-> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : Dict )-> Tuple:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase__ , )
return self.image_processor
| 368 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> Tuple:
# save results
if os.path.exists(snake_case_ ):
if os.path.exists(os.path.join(snake_case_ , """config.json""" ) ) and os.path.isfile(
os.path.join(snake_case_ , """config.json""" ) ):
os.remove(os.path.join(snake_case_ , """config.json""" ) )
if os.path.exists(os.path.join(snake_case_ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(snake_case_ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(snake_case_ , """pytorch_model.bin""" ) )
else:
os.makedirs(snake_case_ )
model.save_pretrained(snake_case_ )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=False ) -> str:
snake_case = 2
if unlogit:
snake_case = torch.pow(snake_case_ , snake_case_ )
snake_case = p * torch.log(snake_case_ )
snake_case = 0
return -plogp.sum(dim=-1 )
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Optional[int]:
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(snake_case_ ) ) ) )
for row in range(len(snake_case_ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Union[str, Any]=False ) -> int:
snake_case , snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
snake_case = torch.zeros(snake_case_ , snake_case_ ).to(args.device )
snake_case = torch.zeros(snake_case_ , snake_case_ ).to(args.device )
if head_mask is None:
snake_case = torch.ones(snake_case_ , snake_case_ ).to(args.device )
head_mask.requires_grad_(requires_grad=snake_case_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
snake_case = None
snake_case = 0.0
snake_case = 0.0
for step, inputs in enumerate(tqdm(snake_case_ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
snake_case = tuple(t.to(args.device ) for t in inputs )
((snake_case ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
snake_case = model(snake_case_ , labels=snake_case_ , head_mask=snake_case_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
snake_case , snake_case , snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(snake_case_ ):
snake_case = entropy(attn.detach() , snake_case_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(snake_case_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
snake_case = 2
snake_case = torch.pow(torch.pow(snake_case_ , snake_case_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(snake_case_ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(snake_case_ )
logger.info("""Head ranked by importance scores""" )
snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
snake_case = torch.arange(
head_importance.numel() , device=args.device )
snake_case = head_ranks.view_as(snake_case_ )
print_ad_tensor(snake_case_ )
return attn_entropy, head_importance, total_loss
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> Any:
snake_case , snake_case , snake_case = compute_heads_importance(snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ )
snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , snake_case_ , original_score * args.masking_threshold )
snake_case = torch.ones_like(snake_case_ )
snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
snake_case = original_score
while current_score >= original_score * args.masking_threshold:
snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
snake_case = float("""Inf""" )
snake_case = head_importance.view(-1 ).sort()[1]
if len(snake_case_ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
snake_case = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
snake_case = new_head_mask.view(-1 )
snake_case = 0.0
snake_case = new_head_mask.view_as(snake_case_ )
snake_case = new_head_mask.clone().detach()
print_ad_tensor(snake_case_ )
# Compute metric and head importance again
snake_case , snake_case , snake_case = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , head_mask=snake_case_ )
snake_case = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , snake_case_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info("""Final head mask""" )
print_ad_tensor(snake_case_ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] ) -> Union[str, Any]:
snake_case = datetime.now()
snake_case , snake_case , snake_case = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , compute_importance=snake_case_ , head_mask=snake_case_ )
snake_case = 1 / loss
snake_case = datetime.now() - before_time
snake_case = sum(p.numel() for p in model.parameters() )
snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(snake_case_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(snake_case_ , snake_case_ ):
snake_case = [
v,
]
assert sum(len(snake_case_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(snake_case_ )
snake_case = sum(p.numel() for p in model.parameters() )
snake_case = datetime.now()
snake_case , snake_case , snake_case = compute_heads_importance(
snake_case_ , snake_case_ , snake_case_ , compute_entropy=snake_case_ , compute_importance=snake_case_ , head_mask=snake_case_ , actually_pruned=snake_case_ , )
snake_case = 1 / loss
snake_case = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , snake_case_ , snake_case_ , pruned_num_params / original_num_params * 1_00 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , snake_case_ , snake_case_ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 1_00 )
save_model(snake_case_ , args.output_dir )
def __lowerCamelCase ( ) -> str:
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=snake_case_ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=snake_case_ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=snake_case_ , type=snake_case_ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=snake_case_ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don\'t normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don\'t normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=snake_case_ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=snake_case_ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=snake_case_ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=snake_case_ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=snake_case_ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=snake_case_ , default=42 )
parser.add_argument("""--local_rank""" , type=snake_case_ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=snake_case_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=snake_case_ , default="""""" , help="""Can be used for distant debugging.""" )
snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
snake_case = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
snake_case = torch.device("""cuda""" , args.local_rank )
snake_case = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
snake_case = nn.parallel.DistributedDataParallel(
snake_case_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=snake_case_ )
elif args.n_gpu > 1:
snake_case = nn.DataParallel(snake_case_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=snake_case_ )
torch.save(snake_case_ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , snake_case_ )
# Prepare dataset
snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
snake_case = (torch.from_numpy(snake_case_ ),)
snake_case = TensorDataset(*snake_case_ )
snake_case = RandomSampler(snake_case_ )
snake_case = DataLoader(snake_case_ , sampler=snake_case_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(snake_case_ , snake_case_ , snake_case_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
snake_case = mask_heads(snake_case_ , snake_case_ , snake_case_ )
prune_heads(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 369 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 0 |
'''simple docstring'''
import requests
_SCREAMING_SNAKE_CASE = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __lowerCamelCase ( __lowerCAmelCase : str ) -> List[Any]:
snake_case = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 370 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict = 10_00 ) -> List[str]:
return sum(e for e in range(3 , __a ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 371 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_SCREAMING_SNAKE_CASE = [{"type": "code", "content": INSTALL_CONTENT}]
_SCREAMING_SNAKE_CASE = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 350 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case_ = VideoToVideoSDPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
snake_case_ = False
# No `output_type`.
snake_case_ = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def lowerCAmelCase ( self : Any )-> Optional[int]:
torch.manual_seed(0 )
snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
snake_case = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
snake_case = CLIPTextModel(snake_case_ )
snake_case = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] , __snake_case : Union[str, Any]=0 )-> Optional[int]:
# 3 frames
snake_case = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
if str(snake_case_ ).startswith("""mps""" ):
snake_case = torch.manual_seed(snake_case_ )
else:
snake_case = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = VideoToVideoSDPipeline(**snake_case_ )
snake_case = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
snake_case = self.get_dummy_inputs(snake_case_ )
snake_case = '''np'''
snake_case = sd_pipe(**snake_case_ ).frames
snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
snake_case = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : Tuple )-> Tuple:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase ( self : Optional[int] )-> Optional[Any]:
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def lowerCAmelCase ( self : int )-> str:
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
pass
def lowerCAmelCase ( self : Dict )-> List[Any]:
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> Dict:
snake_case = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case = torch.randn((1, 10, 3, 10_24, 5_76) , generator=snake_case_ )
snake_case = video.to("""cuda""" )
snake_case = '''Spiderman is surfing'''
snake_case = pipe(snake_case_ , video=snake_case_ , generator=snake_case_ , num_inference_steps=3 , output_type="""pt""" ).frames
snake_case = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = """T5Config"""
def __lowerCamelCase ( __lowerCAmelCase : jnp.array , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Optional[int]:
snake_case = jnp.zeros_like(__lowerCAmelCase )
snake_case = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
snake_case = shifted_input_ids.at[:, 0].set(__lowerCAmelCase )
snake_case = jnp.where(shifted_input_ids == -1_00 , __lowerCAmelCase , __lowerCAmelCase )
return shifted_input_ids
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mt5"
snake_case_ = MTaConfig
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mt5"
snake_case_ = MTaConfig
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mt5"
snake_case_ = MTaConfig
| 352 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 0 |
'''simple docstring'''
import os
_SCREAMING_SNAKE_CASE = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000}
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Tuple:
snake_case = 0
snake_case = 0
while index < len(__lowerCAmelCase ) - 1:
snake_case = SYMBOLS[numerals[index]]
snake_case = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowerCamelCase ( __lowerCAmelCase : int ) -> Optional[Any]:
snake_case = """"""
snake_case = num // 10_00
numerals += m_count * "M"
num %= 10_00
snake_case = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
snake_case = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowerCamelCase ( __lowerCAmelCase : str = "/p089_roman.txt" ) -> List[Any]:
snake_case = 0
with open(os.path.dirname(__lowerCAmelCase ) + roman_numerals_filename ) as filea:
snake_case = filea.readlines()
for line in lines:
snake_case = line.strip()
snake_case = parse_roman_numerals(__lowerCAmelCase )
snake_case = generate_roman_numerals(__lowerCAmelCase )
savings += len(__lowerCAmelCase ) - len(__lowerCAmelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 353 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _lowerCAmelCase ( unittest.TestCase , UpperCamelCase__ ):
"""simple docstring"""
def lowerCAmelCase ( self : Dict )-> Tuple:
snake_case = load_tool("""text-to-speech""" )
self.tool.setup()
def lowerCAmelCase ( self : Optional[Any] )-> Optional[int]:
torch.manual_seed(0 )
snake_case = self.tool("""hey""" )
snake_case = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
def lowerCAmelCase ( self : str )-> Optional[int]:
torch.manual_seed(0 )
snake_case = self.tool("""hey""" )
snake_case = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_00_59_66_66_88_32_11_58_29, -0.0_00_36_57_64_01_90_79_50_64, -0.00_01_34_39_50_27_99_88_34_85] ) , ) )
| 354 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = list[list[int]]
# assigning initial values to the grid
_SCREAMING_SNAKE_CASE = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_SCREAMING_SNAKE_CASE = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowerCamelCase ( __lowerCAmelCase : Matrix , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> Any:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowerCamelCase ( __lowerCAmelCase : Matrix ) -> List[str]:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowerCamelCase ( __lowerCAmelCase : Matrix ) -> str:
if location := find_empty_location(lowerCamelCase_ ):
snake_case = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
snake_case = digit
if sudoku(lowerCamelCase_ ) is not None:
return grid
snake_case = 0
return None
def __lowerCamelCase ( __lowerCAmelCase : Matrix ) -> Union[str, Any]:
for row in grid:
for cell in row:
print(lowerCamelCase_ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
_SCREAMING_SNAKE_CASE = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 355 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
snake_case = collections.OrderedDict()
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
snake_case = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case = token.rstrip("""\n""" )
snake_case = index
return vocab
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]:
snake_case = vocab
snake_case = unk_token
snake_case = max_input_chars_per_word
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]:
snake_case = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case = 0
snake_case = []
while start < len(__snake_case ):
snake_case = len(__snake_case )
snake_case = None
while start < end:
snake_case = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
snake_case = end
return sub_tokens
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
snake_case = bod_token
snake_case = eod_token
snake_case = load_vocab(__snake_case )
snake_case = self.encoder[space_token]
snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : str )-> Tuple:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] )-> int:
return len(self.encoder )
def lowerCAmelCase ( self : Any )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]:
snake_case = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]:
snake_case = [i for i in token_ids if i >= 0]
snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]:
return token in self.encoder
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
return "".join(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if os.path.isdir(__snake_case ):
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case = 0
if " " in self.encoder:
snake_case = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 3 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_SCREAMING_SNAKE_CASE : Optional[Any] = ''
_SCREAMING_SNAKE_CASE : List[Any] = ''
_SCREAMING_SNAKE_CASE : str = ''
_SCREAMING_SNAKE_CASE : Union[str, Any] = ''
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> int:
# authorize twitter, initialize tweepy
snake_case = tweepy.OAuthHandler(_A , _A )
auth.set_access_token(_A , _A )
snake_case = tweepy.API(_A )
# initialize a list to hold all the tweepy Tweets
snake_case = []
# make initial request for most recent tweets (200 is the maximum allowed count)
snake_case = api.user_timeline(screen_name=_A , count=2_00 )
# save most recent tweets
alltweets.extend(_A )
# save the id of the oldest tweet less one
snake_case = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_A ) > 0:
print(F'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
snake_case = api.user_timeline(
screen_name=_A , count=2_00 , max_id=_A )
# save most recent tweets
alltweets.extend(_A )
# update the id of the oldest tweet less one
snake_case = alltweets[-1].id - 1
print(F'''...{len(_A )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
snake_case = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
snake_case = csv.writer(_A )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(_A )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("FirePing32")
| 357 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_SCREAMING_SNAKE_CASE = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = soup.findAll("""h1""" )
snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 3 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Tuple=False ) -> Union[str, Any]:
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''transformer.blocks.{i}.norm1.weight''', F'''vilt.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm1.bias''', F'''vilt.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.weight''', F'''vilt.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''transformer.blocks.{i}.attn.proj.bias''', F'''vilt.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.weight''', F'''vilt.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.norm2.bias''', F'''vilt.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''transformer.blocks.{i}.mlp.fc1.weight''', F'''vilt.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc1.bias''', F'''vilt.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.weight''', F'''vilt.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''transformer.blocks.{i}.mlp.fc2.bias''', F'''vilt.encoder.layer.{i}.output.dense.bias''') )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> List[str]:
for i in range(config.num_hidden_layers ):
snake_case = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.weight''' )
snake_case = state_dict.pop(F'''transformer.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = in_proj_bias[: config.hidden_size]
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Tuple:
snake_case = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
snake_case = dct.pop(__lowerCAmelCase )
snake_case = val
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> Union[str, Any]:
snake_case = ViltConfig(image_size=3_84 , patch_size=32 , tie_word_embeddings=__lowerCAmelCase )
snake_case = False
snake_case = False
snake_case = False
snake_case = False
if "vqa" in checkpoint_url:
snake_case = True
snake_case = 31_29
snake_case = """huggingface/label-files"""
snake_case = """vqa2-id2label.json"""
snake_case = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = ViltForQuestionAnswering(__lowerCAmelCase )
elif "nlvr" in checkpoint_url:
snake_case = True
snake_case = 2
snake_case = {0: """False""", 1: """True"""}
snake_case = {v: k for k, v in config.idalabel.items()}
snake_case = 3
snake_case = ViltForImagesAndTextClassification(__lowerCAmelCase )
elif "irtr" in checkpoint_url:
snake_case = True
snake_case = ViltForImageAndTextRetrieval(__lowerCAmelCase )
elif "mlm_itm" in checkpoint_url:
snake_case = True
snake_case = ViltForMaskedLM(__lowerCAmelCase )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
snake_case = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" )["""state_dict"""]
snake_case = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
if mlm_model or irtr_model:
snake_case = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
snake_case , snake_case = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__lowerCAmelCase )
# Define processor
snake_case = ViltImageProcessor(size=3_84 )
snake_case = BertTokenizer.from_pretrained("""bert-base-uncased""" )
snake_case = ViltProcessor(__lowerCAmelCase , __lowerCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
snake_case = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__lowerCAmelCase ).raw )
snake_case = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=__lowerCAmelCase ).raw )
snake_case = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
snake_case = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="""pt""" )
snake_case = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="""pt""" )
snake_case = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
snake_case = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=__lowerCAmelCase ).raw )
if mlm_model:
snake_case = """a bunch of [MASK] laying on a [MASK]."""
else:
snake_case = """How many cats are there?"""
snake_case = processor(__lowerCAmelCase , __lowerCAmelCase , return_tensors="""pt""" )
snake_case = model(**__lowerCAmelCase )
# Verify outputs
if mlm_model:
snake_case = torch.Size([1, 11, 3_05_22] )
snake_case = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
snake_case = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
snake_case = torch.Size([1, 31_29] )
snake_case = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __lowerCAmelCase , atol=1e-4 )
# verify vqa prediction equals "2"
snake_case = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
snake_case = torch.Size([1, 2] )
snake_case = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 359 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BarthezTokenizer
snake_case_ = BarthezTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Any )-> Optional[Any]:
super().setUp()
snake_case = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__A )
snake_case = tokenizer
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case = '''<pad>'''
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__A ) , 10_11_22 )
def lowerCAmelCase ( self : Dict )-> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def lowerCAmelCase ( self : Optional[int] )-> Dict:
snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
snake_case = [0, 57, 30_18, 7_03_07, 91, 2]
snake_case = self.tokenizer(
__A , max_length=len(__A ) , padding=__A , truncation=__A , return_tensors="""pt""" )
self.assertIsInstance(__A , __A )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__A , __A )
def lowerCAmelCase ( self : str )-> Dict:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = '''I was born in 92000, and this is falsé.'''
snake_case = tokenizer.tokenize(__A )
snake_case = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
snake_case = tokenizer.encode(__A , add_special_tokens=__A )
snake_case = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__A )
snake_case = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
@slow
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# fmt: off
snake_case = {'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__A , )
| 360 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = Rectangle(height=0.5 , width=0.5 )
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case = [mem.copy() for i in range(6 )]
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__a ).arrange(__a , buff=0 )
snake_case = VGroup(*__a ).arrange(__a , buff=0 )
snake_case = VGroup(__a , __a ).arrange(__a , buff=0 )
snake_case = Text("""CPU""" , font_size=24 )
snake_case = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
snake_case = [mem.copy() for i in range(1 )]
snake_case = VGroup(*__a ).arrange(__a , buff=0 )
snake_case = Text("""GPU""" , font_size=24 )
snake_case = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.align_to(__a , __a )
gpu.set_x(gpu.get_x() - 1 )
self.add(__a )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__a ).arrange(__a , buff=0 )
snake_case = Text("""Model""" , font_size=24 )
snake_case = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.play(
Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , )
snake_case = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=2.5 ) , Write(__a ) , Write(__a ) )
self.add(__a )
snake_case = []
snake_case = []
snake_case = []
for i, rect in enumerate(__a ):
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
cpu_target.move_to(__a )
cpu_target.generate_target()
snake_case = 0.46 / 4
snake_case = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__a , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__a , buff=0.0 )
cpu_targs.append(__a )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__a ) )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 361 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3 | 0 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> int:
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> Optional[int]:
snake_case = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
snake_case = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
snake_case = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
snake_case = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
snake_case = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
snake_case = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
snake_case = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
snake_case = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
snake_case = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
snake_case = key.replace("""image_encoder.module""" , """flava.image_model""" )
snake_case = key.replace("""text_encoder.module""" , """flava.text_model""" )
snake_case = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
snake_case = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
snake_case = key.replace("""text_projection""" , """flava.text_projection""" )
snake_case = key.replace("""image_projection""" , """flava.image_projection""" )
snake_case = value.float()
for key, value in codebook_state_dict.items():
snake_case = value
return upgrade
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int=None ) -> Optional[Any]:
if config_path is not None:
snake_case = FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
else:
snake_case = FlavaConfig()
snake_case = FlavaForPreTraining(SCREAMING_SNAKE_CASE_ ).eval()
snake_case = convert_dalle_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , save_checkpoint=SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
snake_case = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
else:
snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
snake_case = upgrade_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
hf_model.load_state_dict(SCREAMING_SNAKE_CASE_ )
snake_case = hf_model.state_dict()
snake_case = count_parameters(SCREAMING_SNAKE_CASE_ )
snake_case = count_parameters(SCREAMING_SNAKE_CASE_ ) + count_parameters(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 363 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 0 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _lowerCAmelCase ( a__ ):
"""simple docstring"""
snake_case_ = "M-CLIP"
def __init__( self : List[str] , __snake_case : List[Any]=10_24 , __snake_case : Optional[Any]=7_68 , **__snake_case : Tuple )-> Dict:
snake_case = transformerDimSize
snake_case = imageDimSize
super().__init__(**SCREAMING_SNAKE_CASE_ )
class _lowerCAmelCase ( a__ ):
"""simple docstring"""
snake_case_ = MCLIPConfig
def __init__( self : Dict , __snake_case : Union[str, Any] , *__snake_case : int , **__snake_case : List[Any] )-> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
snake_case = XLMRobertaModel(SCREAMING_SNAKE_CASE_ )
snake_case = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase ( self : Dict , __snake_case : Any , __snake_case : List[Any] )-> Tuple:
snake_case = self.transformer(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
snake_case = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(SCREAMING_SNAKE_CASE_ ), embs
| 364 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
import re
def __lowerCamelCase ( __lowerCAmelCase : str ) -> bool:
snake_case = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 365 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
snake_case = SEWConfig()
if is_finetuned:
snake_case = model.wav_encoder.wav_model.cfg
else:
snake_case = model.cfg
snake_case = fs_config.conv_bias
snake_case = eval(fs_config.conv_feature_layers )
snake_case = [x[0] for x in conv_layers]
snake_case = [x[1] for x in conv_layers]
snake_case = [x[2] for x in conv_layers]
snake_case = """gelu"""
snake_case = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
snake_case = 0.0
snake_case = fs_config.activation_fn.name
snake_case = fs_config.encoder_embed_dim
snake_case = 0.02
snake_case = fs_config.encoder_ffn_embed_dim
snake_case = 1e-5
snake_case = fs_config.encoder_layerdrop
snake_case = fs_config.encoder_attention_heads
snake_case = fs_config.conv_pos_groups
snake_case = fs_config.conv_pos
snake_case = len(__lowerCAmelCase )
snake_case = fs_config.encoder_layers
snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case = model.cfg
snake_case = fs_config.final_dropout
snake_case = fs_config.layerdrop
snake_case = fs_config.activation_dropout
snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case = fs_config.attention_dropout
snake_case = fs_config.dropout_input
snake_case = fs_config.dropout
snake_case = fs_config.mask_channel_length
snake_case = fs_config.mask_channel_prob
snake_case = fs_config.mask_length
snake_case = fs_config.mask_prob
snake_case = """Wav2Vec2FeatureExtractor"""
snake_case = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : str=True ) -> Any:
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = convert_config(model[0] , __lowerCAmelCase )
snake_case = model[0].eval()
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = SEWForCTC(__lowerCAmelCase )
else:
snake_case = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list[list[int | float]] ) -> int:
snake_case = len(snake_case_ )
snake_case = len(matrix[0] )
snake_case = min(snake_case_ , snake_case_ )
for row in range(snake_case_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case_ ):
snake_case = matrix[col][row] / matrix[row][row]
for i in range(snake_case_ , snake_case_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
snake_case = True
for i in range(row + 1 , snake_case_ ):
if matrix[i][row] != 0:
snake_case , snake_case = matrix[i], matrix[row]
snake_case = False
break
if reduce:
rank -= 1
for i in range(snake_case_ ):
snake_case = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_SCREAMING_SNAKE_CASE = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
_SCREAMING_SNAKE_CASE = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_SCREAMING_SNAKE_CASE = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_SCREAMING_SNAKE_CASE = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_UpperCamelCase )
class _lowerCAmelCase :
"""simple docstring"""
def __call__( self : Any , __snake_case : Optional[int] , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None , __snake_case : Union[bool, str] = False , __snake_case : Union[bool, str] = False , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[bool] = None , **__snake_case : List[Any] , )-> str:
if titles is None and texts is None:
return super().__call__(
_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
elif titles is None or texts is None:
snake_case = titles if texts is None else texts
return super().__call__(
_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case = titles if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [titles]
snake_case = texts if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [texts]
snake_case = len(_UpperCAmelCase )
snake_case = questions if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) else [questions] * n_passages
assert len(_UpperCAmelCase ) == len(
_UpperCAmelCase ), f'''There should be as many titles than texts but got {len(_UpperCAmelCase )} titles and {len(_UpperCAmelCase )} texts.'''
snake_case = super().__call__(_UpperCAmelCase , _UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['input_ids']
snake_case = super().__call__(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )['input_ids']
snake_case = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCAmelCase , _UpperCAmelCase )
]
}
if return_attention_mask is not False:
snake_case = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case = attention_mask
return self.pad(_UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
def lowerCAmelCase ( self : Any , __snake_case : BatchEncoding , __snake_case : DPRReaderOutput , __snake_case : int = 16 , __snake_case : int = 64 , __snake_case : int = 4 , )-> List[Any]:
snake_case = reader_input['input_ids']
snake_case = reader_output[:3]
snake_case = len(_UpperCAmelCase )
snake_case = sorted(range(_UpperCAmelCase ) , reverse=_UpperCAmelCase , key=relevance_logits.__getitem__ )
snake_case = []
for doc_id in sorted_docs:
snake_case = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case = sequence_ids.index(self.pad_token_id )
else:
snake_case = len(_UpperCAmelCase )
snake_case = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCAmelCase , top_spans=_UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCAmelCase , start_index=_UpperCAmelCase , end_index=_UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase ( self : Any , __snake_case : List[int] , __snake_case : List[int] , __snake_case : int , __snake_case : int , )-> Union[str, Any]:
snake_case = []
for start_index, start_score in enumerate(_UpperCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case = sorted(_UpperCAmelCase , key=lambda __snake_case : x[1] , reverse=_UpperCAmelCase )
snake_case = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
snake_case = end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCamelCase )
class _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ['input_ids', 'attention_mask']
snake_case_ = DPRReaderTokenizer
| 368 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowerCAmelCase ( __snake_case ):
"""simple docstring"""
snake_case_ = """blenderbot-small"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , __snake_case : str=5_02_65 , __snake_case : Optional[int]=5_12 , __snake_case : Any=8 , __snake_case : Any=20_48 , __snake_case : Union[str, Any]=16 , __snake_case : Tuple=8 , __snake_case : List[str]=20_48 , __snake_case : Tuple=16 , __snake_case : int=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : str="gelu" , __snake_case : Union[str, Any]=5_12 , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.0 , __snake_case : List[str]=0.0 , __snake_case : int=0.02 , __snake_case : Dict=1 , __snake_case : Dict=False , __snake_case : Optional[Any]=0 , __snake_case : int=1 , __snake_case : Any=2 , __snake_case : str=2 , **__snake_case : Tuple , )-> Any:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _lowerCAmelCase ( __snake_case ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : str )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case = {0: "batch"}
snake_case = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case = {0: "batch", 1: "decoder_sequence"}
snake_case = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case = {0: "batch", 2: "past_sequence + sequence"}
snake_case = {0: "batch", 2: "past_sequence + sequence"}
else:
snake_case = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def lowerCAmelCase ( self : List[str] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case = super().outputs
else:
snake_case = super(UpperCamelCase__ , self ).outputs
if self.use_past:
snake_case = self.num_layers
for i in range(UpperCamelCase__ ):
snake_case = {0: "batch", 2: "past_sequence + sequence"}
snake_case = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowerCAmelCase ( self : List[Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
snake_case = seq_length if not self.use_past else 1
snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
snake_case = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case = common_inputs["input_ids"].shape
snake_case = common_inputs["decoder_input_ids"].shape[1]
snake_case = self.num_attention_heads
snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case = decoder_seq_length + 3
snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case = self.num_layers
snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
snake_case = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
snake_case = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
snake_case = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case = seqlen + 2
snake_case = self.num_layers
snake_case = self.num_attention_heads
snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case = common_inputs["attention_mask"].dtype
snake_case = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
snake_case = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def lowerCAmelCase ( self : Tuple , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
snake_case = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
snake_case = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def lowerCAmelCase ( self : Optional[int] , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
snake_case = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
snake_case = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def lowerCAmelCase ( self : str , __snake_case : str , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Optional[int] )-> List[Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
snake_case = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 369 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ) -> Dict:
snake_case = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict ) -> List[str]:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
snake_case = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
snake_case = in_proj_weight[
: encoder_config.hidden_size, :
]
snake_case = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
snake_case = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> Dict:
snake_case = dct.pop(__lowerCAmelCase )
snake_case = val
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
if "handwritten" in checkpoint_url:
snake_case = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
snake_case = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ) -> List[str]:
snake_case = ViTConfig(image_size=3_84 , qkv_bias=__lowerCAmelCase )
snake_case = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
snake_case = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 10_24
else:
raise ValueError("""Should either find \'base\' or \'large\' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case = False
snake_case = """relu"""
snake_case = 10_24
snake_case = True
snake_case = False
snake_case = False
# load HuggingFace model
snake_case = ViTModel(__lowerCAmelCase , add_pooling_layer=__lowerCAmelCase )
snake_case = TrOCRForCausalLM(__lowerCAmelCase )
snake_case = VisionEncoderDecoderModel(encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
snake_case = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="""cpu""" , check_hash=__lowerCAmelCase )["""model"""]
snake_case = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
snake_case = state_dict.pop(__lowerCAmelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
snake_case = val
else:
snake_case = val
# load state dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image
snake_case = ViTImageProcessor(size=encoder_config.image_size )
snake_case = RobertaTokenizer.from_pretrained("""roberta-large""" )
snake_case = TrOCRProcessor(__lowerCAmelCase , __lowerCAmelCase )
snake_case = processor(images=prepare_img(__lowerCAmelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
snake_case = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
snake_case = model(pixel_values=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
snake_case = outputs.logits
snake_case = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
snake_case = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
snake_case = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
snake_case = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
snake_case = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 370 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
snake_case = tempfile.mkdtemp()
# fmt: off
snake_case = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case = dict(zip(A_ , range(len(A_ ) ) ) )
snake_case = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case = {"""unk_token""": """<unk>"""}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A_ ) )
snake_case = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
snake_case = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A_ , A_ )
def lowerCAmelCase ( self : int , **__snake_case : List[Any] )-> Union[str, Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **A_ )
def lowerCAmelCase ( self : Tuple , **__snake_case : Any )-> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **A_ )
def lowerCAmelCase ( self : Optional[int] , **__snake_case : str )-> Union[str, Any]:
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def lowerCAmelCase ( self : Dict )-> List[str]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Any )-> List[Any]:
snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = self.get_image_processor()
snake_case = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
snake_case = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case = self.get_image_processor(do_normalize=A_ )
snake_case = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(A_ , return_tensors="""np""" )
snake_case = processor(images=A_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase ( self : int )-> str:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
snake_case = """lower newer"""
snake_case = processor(text=A_ , return_tensors="""np""" )
snake_case = tokenizer(A_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase ( self : Union[str, Any] )-> int:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
snake_case = """lower newer"""
snake_case = self.prepare_image_inputs()
snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowerCAmelCase ( self : Any )-> Union[str, Any]:
snake_case = """google/owlvit-base-patch32"""
snake_case = OwlViTProcessor.from_pretrained(A_ )
snake_case = ["""cat""", """nasa badge"""]
snake_case = processor(text=A_ )
snake_case = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowerCAmelCase ( self : Union[str, Any] )-> Union[str, Any]:
snake_case = """google/owlvit-base-patch32"""
snake_case = OwlViTProcessor.from_pretrained(A_ )
snake_case = [["""cat""", """nasa badge"""], ["""person"""]]
snake_case = processor(text=A_ )
snake_case = 16
snake_case = len(A_ )
snake_case = max([len(A_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowerCAmelCase ( self : Any )-> Dict:
snake_case = """google/owlvit-base-patch32"""
snake_case = OwlViTProcessor.from_pretrained(A_ )
snake_case = ["""cat""", """nasa badge"""]
snake_case = processor(text=A_ )
snake_case = 16
snake_case = inputs["""input_ids"""]
snake_case = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase ( self : Union[str, Any] )-> Optional[Any]:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
snake_case = self.prepare_image_inputs()
snake_case = self.prepare_image_inputs()
snake_case = processor(images=A_ , query_images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowerCAmelCase ( self : List[str] )-> int:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case = processor.batch_decode(A_ )
snake_case = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 371 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "falcon"
snake_case_ = ["past_key_values"]
def __init__( self : Dict , __snake_case : Optional[int]=6_50_24 , __snake_case : Optional[int]=45_44 , __snake_case : List[Any]=32 , __snake_case : Tuple=71 , __snake_case : Dict=1e-5 , __snake_case : Optional[int]=0.02 , __snake_case : Optional[int]=True , __snake_case : Optional[Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : Union[str, Any]=True , __snake_case : int=True , __snake_case : Optional[Any]=False , __snake_case : Union[str, Any]=11 , __snake_case : str=11 , **__snake_case : Optional[Any] , )-> List[Any]:
snake_case = vocab_size
# Backward compatibility with n_embed kwarg
snake_case = kwargs.pop("""n_embed""" , __snake_case )
snake_case = hidden_size if n_embed is None else n_embed
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = layer_norm_epsilon
snake_case = initializer_range
snake_case = use_cache
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = bos_token_id
snake_case = eos_token_id
snake_case = num_attention_heads if num_kv_heads is None else num_kv_heads
snake_case = alibi
snake_case = new_decoder_architecture
snake_case = multi_query # Ignored when new_decoder_architecture is True
snake_case = parallel_attn
snake_case = bias
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@property
def lowerCAmelCase ( self : Union[str, Any] )-> int:
return self.hidden_size // self.num_attention_heads
@property
def lowerCAmelCase ( self : int )-> List[str]:
return not self.alibi
| 350 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "BlipImageProcessor"
snake_case_ = "AutoTokenizer"
def __init__( self : Any , __snake_case : Dict , __snake_case : Tuple , __snake_case : Any )-> int:
super().__init__(__snake_case , __snake_case )
# add QFormer tokenizer
snake_case = qformer_tokenizer
def __call__( self : str , __snake_case : ImageInput = None , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : Any , )-> BatchFeature:
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
snake_case = BatchFeature()
if text is not None:
snake_case = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
encoding.update(__snake_case )
snake_case = self.qformer_tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
snake_case = qformer_text_encoding.pop("""input_ids""" )
snake_case = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
snake_case = self.image_processor(__snake_case , return_tensors=__snake_case )
encoding.update(__snake_case )
return encoding
def lowerCAmelCase ( self : Any , *__snake_case : int , **__snake_case : Optional[int] )-> str:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : int , *__snake_case : int , **__snake_case : str )-> Union[str, Any]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase ( self : Union[str, Any] )-> List[str]:
snake_case = self.tokenizer.model_input_names
snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase ( self : Tuple , __snake_case : Dict , **__snake_case : Optional[Any] )-> Optional[int]:
if os.path.isfile(__snake_case ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__snake_case , exist_ok=__snake_case )
snake_case = os.path.join(__snake_case , """qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(__snake_case )
return super().save_pretrained(__snake_case , **__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[str] , __snake_case : str , **__snake_case : int )-> Tuple:
snake_case = AutoTokenizer.from_pretrained(__snake_case , subfolder="""qformer_tokenizer""" )
snake_case = cls._get_arguments_from_pretrained(__snake_case , **__snake_case )
args.append(__snake_case )
return cls(*__snake_case )
| 352 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 0 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_SCREAMING_SNAKE_CASE = "src/diffusers"
# Matches is_xxx_available()
_SCREAMING_SNAKE_CASE = re.compile(r"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
_SCREAMING_SNAKE_CASE = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
_SCREAMING_SNAKE_CASE = "\n{0} = None\n"
_SCREAMING_SNAKE_CASE = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
_SCREAMING_SNAKE_CASE = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Dict:
snake_case = _re_backend.findall(__lowerCAmelCase )
if len(__lowerCAmelCase ) == 0:
return None
return "_and_".join(__lowerCAmelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
with open(os.path.join(__lowerCAmelCase , """__init__.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case = f.readlines()
# Get to the point we do the actual imports for type checking
snake_case = 0
snake_case = {}
# Go through the end of the file
while line_index < len(__lowerCAmelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
snake_case = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("""else:""" ):
line_index += 1
line_index += 1
snake_case = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCAmelCase ) and len(lines[line_index] ) > 1:
snake_case = lines[line_index]
snake_case = _re_single_line_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCAmelCase ) > 0:
snake_case = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCAmelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCAmelCase , __lowerCAmelCase )
else:
return DUMMY_CLASS.format(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any]=None ) -> Tuple:
if backend_specific_objects is None:
snake_case = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
snake_case = {}
for backend, objects in backend_specific_objects.items():
snake_case = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""" ) ) + """]"""
snake_case = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCAmelCase , __lowerCAmelCase ) for o in objects] )
snake_case = dummy_file
return dummy_files
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any]=False ) -> Union[str, Any]:
snake_case = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
snake_case = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
snake_case = os.path.join(__lowerCAmelCase , """utils""" )
snake_case = {
backend: os.path.join(__lowerCAmelCase , F'''dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py''' )
for backend in dummy_files.keys()
}
snake_case = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case = f.read()
else:
snake_case = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py as the main '''
"""__init__ has new objects.""" )
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(__lowerCAmelCase , __lowerCAmelCase )}_objects.py. Run `make fix-copies` '''
"""to fix this.""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 353 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Optional[Any]:
snake_case = r"""\w+[.]\d+"""
snake_case = re.findall(__lowerCAmelCase , __lowerCAmelCase )
for pat in pats:
snake_case = key.replace(__lowerCAmelCase , """_""".join(pat.split(""".""" ) ) )
return key
def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
snake_case = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
snake_case = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
snake_case = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any]=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
snake_case = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
snake_case = flax_model.init_weights(PRNGKey(__lowerCAmelCase ) )
snake_case = flatten_dict(__lowerCAmelCase )
snake_case = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case = rename_key(__lowerCAmelCase )
snake_case = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
snake_case , snake_case = rename_key_and_reshape_tensor(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
snake_case = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
| 354 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
_SCREAMING_SNAKE_CASE = "\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n"
_SCREAMING_SNAKE_CASE = "\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results['pearsonr'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric(\"pearsonr\")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n ['p-value', 'pearsonr']\n >>> print(round(results['pearsonr'], 2))\n -0.74\n >>> print(round(results['p-value'], 2))\n 0.15\n"
_SCREAMING_SNAKE_CASE = "\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] )-> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any=False )-> List[str]:
if return_pvalue:
snake_case = pearsonr(__snake_case , __snake_case )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__snake_case , __snake_case )[0] )}
| 355 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
snake_case = collections.OrderedDict()
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
snake_case = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case = token.rstrip("""\n""" )
snake_case = index
return vocab
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]:
snake_case = vocab
snake_case = unk_token
snake_case = max_input_chars_per_word
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]:
snake_case = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case = 0
snake_case = []
while start < len(__snake_case ):
snake_case = len(__snake_case )
snake_case = None
while start < end:
snake_case = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
snake_case = end
return sub_tokens
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
snake_case = bod_token
snake_case = eod_token
snake_case = load_vocab(__snake_case )
snake_case = self.encoder[space_token]
snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : str )-> Tuple:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] )-> int:
return len(self.encoder )
def lowerCAmelCase ( self : Any )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]:
snake_case = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]:
snake_case = [i for i in token_ids if i >= 0]
snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]:
return token in self.encoder
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
return "".join(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if os.path.isdir(__snake_case ):
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case = 0
if " " in self.encoder:
snake_case = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 3 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"text": Value("string" )} )
snake_case_ = Features({"summary": Value("string" )} )
snake_case_ = "text"
snake_case_ = "summary"
@property
def lowerCAmelCase ( self : List[str] )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 357 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int = 1_00_00_00 ) -> int:
snake_case = set(range(3 , __lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , __lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __lowerCAmelCase , __lowerCAmelCase ) ) )
snake_case = [float(__lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(__lowerCAmelCase , limit + 1 , __lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 358 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = soup.findAll("""h1""" )
snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 3 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 0 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 360 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
pass
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : Any )-> None:
snake_case = data
snake_case = None
def __iter__( self : Optional[int] )-> List[Any]:
snake_case = self
snake_case = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__snake_case )
yield node.data
snake_case = node.next_node
@property
def lowerCAmelCase ( self : Optional[Any] )-> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = Node(1)
_SCREAMING_SNAKE_CASE = Node(2)
_SCREAMING_SNAKE_CASE = Node(3)
_SCREAMING_SNAKE_CASE = Node(4)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = root_node.next_node
print(root_node.has_loop) # True
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
_SCREAMING_SNAKE_CASE = Node(5)
_SCREAMING_SNAKE_CASE = Node(6)
print(root_node.has_loop) # False
_SCREAMING_SNAKE_CASE = Node(1)
print(root_node.has_loop) # False
| 361 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any )-> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase ( self : Dict )-> Tuple:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
try:
snake_case = tempfile.mktemp()
with open(__snake_case , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , __snake_case )
snake_case = AlbertTokenizer.from_pretrained(__snake_case )
finally:
os.remove(__snake_case )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , __snake_case )
snake_case = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def lowerCAmelCase ( self : Any )-> Dict:
# This test is for deprecated behavior and can be removed in v5
snake_case = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : List[str] )-> List[str]:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Any )-> List[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def lowerCAmelCase ( self : int )-> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(__snake_case )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__snake_case , repo_id="""test-tokenizer""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizer(__snake_case )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__snake_case , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase ( self : Optional[Any] )-> int:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = BertTokenizerFast.from_pretrained(__snake_case )
bert_tokenizer.save_pretrained(__snake_case )
snake_case = CustomTokenizerFast.from_pretrained(__snake_case )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
snake_case = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
snake_case = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__snake_case , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Dict:
snake_case = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def lowerCAmelCase ( self : str )-> Dict:
snake_case = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def lowerCAmelCase ( self : int )-> Optional[int]:
snake_case = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
snake_case = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def lowerCAmelCase ( self : Optional[int] )-> Optional[int]:
snake_case = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
snake_case = Trie()
snake_case = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__snake_case , ["""AB""", """C"""] )
| 362 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 0 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_SCREAMING_SNAKE_CASE = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
_SCREAMING_SNAKE_CASE = {
"169M": 768,
"430M": 1024,
"1B5": 2048,
"3B": 2560,
"7B": 4096,
"14B": 5120,
}
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> str:
snake_case = list(state_dict.keys() )
for name in state_dict_keys:
snake_case = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("""emb.""" ):
snake_case = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
snake_case = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
snake_case = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , __lowerCAmelCase )
# ffn -> feed_forward
snake_case = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
snake_case = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
snake_case = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
snake_case = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
snake_case = """rwkv.""" + name
snake_case = weight
return state_dict
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=None ) -> Optional[Any]:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
snake_case = 5_02_77
snake_case = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
snake_case = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
snake_case = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
snake_case = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
snake_case = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
snake_case = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
snake_case = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
snake_case = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
snake_case , snake_case = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
snake_case = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
snake_case = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + """\n"""
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
snake_case = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
snake_case = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
snake_case = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 363 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 0 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : Union[str, Any]=0.0 , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : str = "layer_norm" , __snake_case : bool = False , )-> Dict:
super().__init__()
snake_case = only_cross_attention
snake_case = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero"""
snake_case = (num_embeds_ada_norm is not None) and norm_type == """ada_norm"""
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
snake_case = AdaLayerNorm(__snake_case , __snake_case )
elif self.use_ada_layer_norm_zero:
snake_case = AdaLayerNormZero(__snake_case , __snake_case )
else:
snake_case = nn.LayerNorm(__snake_case , elementwise_affine=__snake_case )
snake_case = Attention(
query_dim=__snake_case , heads=__snake_case , dim_head=__snake_case , dropout=__snake_case , bias=__snake_case , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__snake_case , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
snake_case = (
AdaLayerNorm(__snake_case , __snake_case )
if self.use_ada_layer_norm
else nn.LayerNorm(__snake_case , elementwise_affine=__snake_case )
)
snake_case = Attention(
query_dim=__snake_case , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__snake_case , dim_head=__snake_case , dropout=__snake_case , bias=__snake_case , upcast_attention=__snake_case , ) # is self-attn if encoder_hidden_states is none
else:
snake_case = None
snake_case = None
# 3. Feed-forward
snake_case = nn.LayerNorm(__snake_case , elementwise_affine=__snake_case )
snake_case = FeedForward(__snake_case , dropout=__snake_case , activation_fn=__snake_case , final_dropout=__snake_case )
# let chunk size default to None
snake_case = None
snake_case = 0
def lowerCAmelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : int )-> List[str]:
# Sets chunk feed-forward
snake_case = chunk_size
snake_case = dim
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : torch.FloatTensor , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[torch.LongTensor] = None , __snake_case : Dict[str, Any] = None , __snake_case : Optional[torch.LongTensor] = None , )-> List[str]:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
snake_case = self.norma(__snake_case , __snake_case )
elif self.use_ada_layer_norm_zero:
snake_case , snake_case , snake_case , snake_case , snake_case = self.norma(
__snake_case , __snake_case , __snake_case , hidden_dtype=hidden_states.dtype )
else:
snake_case = self.norma(__snake_case )
snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {}
snake_case = self.attna(
__snake_case , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__snake_case , **__snake_case , )
if self.use_ada_layer_norm_zero:
snake_case = gate_msa.unsqueeze(1 ) * attn_output
snake_case = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
snake_case = (
self.norma(__snake_case , __snake_case ) if self.use_ada_layer_norm else self.norma(__snake_case )
)
snake_case = self.attna(
__snake_case , encoder_hidden_states=__snake_case , attention_mask=__snake_case , **__snake_case , )
snake_case = attn_output + hidden_states
# 3. Feed-forward
snake_case = self.norma(__snake_case )
if self.use_ada_layer_norm_zero:
snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
snake_case = torch.cat(
[self.ff(__snake_case ) for hid_slice in norm_hidden_states.chunk(__snake_case , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
snake_case = self.ff(__snake_case )
if self.use_ada_layer_norm_zero:
snake_case = gate_mlp.unsqueeze(1 ) * ff_output
snake_case = ff_output + hidden_states
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : Optional[int] = None , __snake_case : int = 4 , __snake_case : float = 0.0 , __snake_case : str = "geglu" , __snake_case : bool = False , )-> Tuple:
super().__init__()
snake_case = int(dim * mult )
snake_case = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
snake_case = GELU(__snake_case , __snake_case )
if activation_fn == "gelu-approximate":
snake_case = GELU(__snake_case , __snake_case , approximate="""tanh""" )
elif activation_fn == "geglu":
snake_case = GEGLU(__snake_case , __snake_case )
elif activation_fn == "geglu-approximate":
snake_case = ApproximateGELU(__snake_case , __snake_case )
snake_case = nn.ModuleList([] )
# project in
self.net.append(__snake_case )
# project dropout
self.net.append(nn.Dropout(__snake_case ) )
# project out
self.net.append(nn.Linear(__snake_case , __snake_case ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__snake_case ) )
def lowerCAmelCase ( self : List[str] , __snake_case : Any )-> Optional[int]:
for module in self.net:
snake_case = module(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , __snake_case : int , __snake_case : int , __snake_case : str = "none" )-> Optional[Any]:
super().__init__()
snake_case = nn.Linear(__snake_case , __snake_case )
snake_case = approximate
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Union[str, Any] )-> str:
if gate.device.type != "mps":
return F.gelu(__snake_case , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCAmelCase ( self : List[str] , __snake_case : List[Any] )-> Optional[Any]:
snake_case = self.proj(__snake_case )
snake_case = self.gelu(__snake_case )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : int )-> Union[str, Any]:
super().__init__()
snake_case = nn.Linear(__snake_case , dim_out * 2 )
def lowerCAmelCase ( self : Optional[int] , __snake_case : Optional[Any] )-> int:
if gate.device.type != "mps":
return F.gelu(__snake_case )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCAmelCase ( self : Any , __snake_case : Union[str, Any] )-> Tuple:
snake_case , snake_case = self.proj(__snake_case ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(__snake_case )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : int , __snake_case : int )-> List[str]:
super().__init__()
snake_case = nn.Linear(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] , __snake_case : Optional[Any] )-> Any:
snake_case = self.proj(__snake_case )
return x * torch.sigmoid(1.7_02 * x )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __snake_case : int , __snake_case : int )-> str:
super().__init__()
snake_case = nn.Embedding(__snake_case , __snake_case )
snake_case = nn.SiLU()
snake_case = nn.Linear(__snake_case , embedding_dim * 2 )
snake_case = nn.LayerNorm(__snake_case , elementwise_affine=__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Optional[Any] , __snake_case : List[str] )-> Tuple:
snake_case = self.linear(self.silu(self.emb(__snake_case ) ) )
snake_case , snake_case = torch.chunk(__snake_case , 2 )
snake_case = self.norm(__snake_case ) * (1 + scale) + shift
return x
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : str , __snake_case : Union[str, Any] )-> Any:
super().__init__()
snake_case = CombinedTimestepLabelEmbeddings(__snake_case , __snake_case )
snake_case = nn.SiLU()
snake_case = nn.Linear(__snake_case , 6 * embedding_dim , bias=__snake_case )
snake_case = nn.LayerNorm(__snake_case , elementwise_affine=__snake_case , eps=1e-6 )
def lowerCAmelCase ( self : str , __snake_case : Dict , __snake_case : int , __snake_case : int , __snake_case : Union[str, Any]=None )-> Optional[int]:
snake_case = self.linear(self.silu(self.emb(__snake_case , __snake_case , hidden_dtype=__snake_case ) ) )
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = emb.chunk(6 , dim=1 )
snake_case = self.norm(__snake_case ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : Optional[str] = None , __snake_case : float = 1e-5 )-> List[Any]:
super().__init__()
snake_case = num_groups
snake_case = eps
if act_fn is None:
snake_case = None
else:
snake_case = get_activation(__snake_case )
snake_case = nn.Linear(__snake_case , out_dim * 2 )
def lowerCAmelCase ( self : Dict , __snake_case : Optional[Any] , __snake_case : Tuple )-> Optional[Any]:
if self.act:
snake_case = self.act(__snake_case )
snake_case = self.linear(__snake_case )
snake_case = emb[:, :, None, None]
snake_case , snake_case = emb.chunk(2 , dim=1 )
snake_case = F.group_norm(__snake_case , self.num_groups , eps=self.eps )
snake_case = x * (1 + scale) + shift
return x
| 364 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 365 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
snake_case = SEWConfig()
if is_finetuned:
snake_case = model.wav_encoder.wav_model.cfg
else:
snake_case = model.cfg
snake_case = fs_config.conv_bias
snake_case = eval(fs_config.conv_feature_layers )
snake_case = [x[0] for x in conv_layers]
snake_case = [x[1] for x in conv_layers]
snake_case = [x[2] for x in conv_layers]
snake_case = """gelu"""
snake_case = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
snake_case = 0.0
snake_case = fs_config.activation_fn.name
snake_case = fs_config.encoder_embed_dim
snake_case = 0.02
snake_case = fs_config.encoder_ffn_embed_dim
snake_case = 1e-5
snake_case = fs_config.encoder_layerdrop
snake_case = fs_config.encoder_attention_heads
snake_case = fs_config.conv_pos_groups
snake_case = fs_config.conv_pos
snake_case = len(__lowerCAmelCase )
snake_case = fs_config.encoder_layers
snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case = model.cfg
snake_case = fs_config.final_dropout
snake_case = fs_config.layerdrop
snake_case = fs_config.activation_dropout
snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case = fs_config.attention_dropout
snake_case = fs_config.dropout_input
snake_case = fs_config.dropout
snake_case = fs_config.mask_channel_length
snake_case = fs_config.mask_channel_prob
snake_case = fs_config.mask_length
snake_case = fs_config.mask_prob
snake_case = """Wav2Vec2FeatureExtractor"""
snake_case = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : str=True ) -> Any:
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = convert_config(model[0] , __lowerCAmelCase )
snake_case = model[0].eval()
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = SEWForCTC(__lowerCAmelCase )
else:
snake_case = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 | 0 |
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_SCREAMING_SNAKE_CASE = get_logger()
_SCREAMING_SNAKE_CASE = None
class _lowerCAmelCase ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : Dict=None , __snake_case : Any=None , **__snake_case : Any )-> List[Any]:
super().__init__(features=__snake_case )
import jax
from jaxlib.xla_client import Device
if isinstance(__snake_case , __snake_case ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__snake_case )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
snake_case = device if isinstance(__snake_case , __snake_case ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case = str(jax.devices()[0] )
snake_case = jnp_array_kwargs
@staticmethod
def lowerCAmelCase ( )-> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__snake_case ): device for device in jax.devices()}
def lowerCAmelCase ( self : Dict , __snake_case : str )-> Optional[int]:
import jax
import jax.numpy as jnp
if isinstance(__snake_case , __snake_case ) and column:
if all(
isinstance(__snake_case , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__snake_case , axis=0 )
return column
def lowerCAmelCase ( self : Optional[int] , __snake_case : Optional[Any] )-> Union[str, Any]:
import jax
import jax.numpy as jnp
if isinstance(__snake_case , (str, bytes, type(__snake_case )) ):
return value
elif isinstance(__snake_case , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case = {}
if isinstance(__snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case = {"""dtype""": jnp.intaa}
else:
snake_case = {"""dtype""": jnp.intaa}
elif isinstance(__snake_case , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__snake_case , PIL.Image.Image ):
snake_case = np.asarray(__snake_case )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__snake_case , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase ( self : List[Any] , __snake_case : Tuple )-> List[Any]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__snake_case , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__snake_case , """__array__""" ) and not isinstance(__snake_case , jax.Array ):
snake_case = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__snake_case , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__snake_case ) for substruct in data_struct] )
elif isinstance(__snake_case , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__snake_case ) for substruct in data_struct] )
return self._tensorize(__snake_case )
def lowerCAmelCase ( self : Optional[int] , __snake_case : dict )-> str:
return map_nested(self._recursive_tensorize , __snake_case , map_list=__snake_case )
def lowerCAmelCase ( self : List[str] , __snake_case : pa.Table )-> Mapping:
snake_case = self.numpy_arrow_extractor().extract_row(__snake_case )
snake_case = self.python_features_decoder.decode_row(__snake_case )
return self.recursive_tensorize(__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : pa.Table )-> "jax.Array":
snake_case = self.numpy_arrow_extractor().extract_column(__snake_case )
snake_case = self.python_features_decoder.decode_column(__snake_case , pa_table.column_names[0] )
snake_case = self.recursive_tensorize(__snake_case )
snake_case = self._consolidate(__snake_case )
return column
def lowerCAmelCase ( self : Any , __snake_case : pa.Table )-> Mapping:
snake_case = self.numpy_arrow_extractor().extract_batch(__snake_case )
snake_case = self.python_features_decoder.decode_batch(__snake_case )
snake_case = self.recursive_tensorize(__snake_case )
for column_name in batch:
snake_case = self._consolidate(batch[column_name] )
return batch
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 0 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = DistilBertTokenizer
snake_case_ = DistilBertTokenizerFast
snake_case_ = True
@slow
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
snake_case = tokenizer.encode("""sequence builders""" , add_special_tokens=__snake_case )
snake_case = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__snake_case )
snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case )
snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 367 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 368 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3 | 0 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = 1.5
snake_case = int(factor * num_class_images )
snake_case = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__lowerCAmelCase , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__lowerCAmelCase )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
snake_case = client.query(text=__lowerCAmelCase )
if len(__lowerCAmelCase ) >= factor * num_class_images or num_images > 1e4:
break
else:
snake_case = int(factor * num_images )
snake_case = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=__lowerCAmelCase , aesthetic_weight=0.1 , )
snake_case = 0
snake_case = 0
snake_case = tqdm(desc="""downloading real regularization images""" , total=__lowerCAmelCase )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
snake_case = class_images[count]
count += 1
try:
snake_case = requests.get(images["""url"""] )
if img.status_code == 2_00:
snake_case = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __lowerCamelCase ( ) -> List[str]:
snake_case = argparse.ArgumentParser("""""" , add_help=__lowerCAmelCase )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=__lowerCAmelCase , type=__lowerCAmelCase )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=__lowerCAmelCase , type=__lowerCAmelCase )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=2_00 , type=__lowerCAmelCase )
return parser.parse_args()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 369 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] )-> str:
snake_case = """ylacombe/bark-small"""
snake_case = tempfile.mkdtemp()
snake_case = """en_speaker_1"""
snake_case = """This is a test string"""
snake_case = """speaker_embeddings_path.json"""
snake_case = """speaker_embeddings"""
def lowerCAmelCase ( self : Union[str, Any] , **__snake_case : Optional[Any] )-> Tuple:
return AutoTokenizer.from_pretrained(self.checkpoint , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] )-> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = self.get_tokenizer()
snake_case = BarkProcessor(tokenizer=__snake_case )
processor.save_pretrained(self.tmpdirname )
snake_case = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : List[str] )-> int:
snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : int )-> int:
snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case = 35
snake_case = 2
snake_case = 8
snake_case = {
"""semantic_prompt""": np.ones(__snake_case ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case = processor(text=self.input_string , voice_preset=__snake_case )
snake_case = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__snake_case , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__snake_case , **__snake_case )
snake_case = processor(text=self.input_string , voice_preset=__snake_case )
snake_case = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__snake_case , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : int )-> Any:
snake_case = self.get_tokenizer()
snake_case = BarkProcessor(tokenizer=__snake_case )
snake_case = processor(text=self.input_string )
snake_case = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=__snake_case , return_attention_mask=__snake_case , return_token_type_ids=__snake_case , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 370 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "gpt_neo"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Optional[int] , __snake_case : Dict=5_02_57 , __snake_case : Tuple=20_48 , __snake_case : Any=20_48 , __snake_case : Optional[Any]=24 , __snake_case : Optional[Any]=[[["global", "local"], 12]] , __snake_case : Optional[Any]=16 , __snake_case : str=None , __snake_case : List[str]=2_56 , __snake_case : Optional[Any]="gelu_new" , __snake_case : Optional[int]=0.0 , __snake_case : str=0.0 , __snake_case : List[Any]=0.0 , __snake_case : str=0.1 , __snake_case : Union[str, Any]=1e-5 , __snake_case : Optional[int]=0.02 , __snake_case : str=True , __snake_case : Tuple=5_02_56 , __snake_case : Dict=5_02_56 , **__snake_case : Tuple , )-> List[str]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = hidden_size
snake_case = num_layers
snake_case = num_heads
snake_case = intermediate_size
snake_case = window_size
snake_case = activation_function
snake_case = resid_dropout
snake_case = embed_dropout
snake_case = attention_dropout
snake_case = classifier_dropout
snake_case = layer_norm_epsilon
snake_case = initializer_range
snake_case = use_cache
snake_case = bos_token_id
snake_case = eos_token_id
snake_case = attention_types
snake_case = self.expand_attention_types_params(__snake_case )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Any )-> Optional[int]:
snake_case = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ) -> Tuple:
import torch
snake_case = input.size()
snake_case = len(__lowerCAmelCase )
snake_case = shape[dimension]
snake_case = torch.arange(0 , __lowerCAmelCase , __lowerCAmelCase )
snake_case = torch.div(sizedim - size , __lowerCAmelCase , rounding_mode="""floor""" ) + 1
snake_case = torch.arange(__lowerCAmelCase ) + low_indices[:min_length][:, None]
snake_case = [slice(__lowerCAmelCase )] * rank
snake_case = indices
snake_case = input[s]
snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> List[Any]:
import torch
snake_case = torch.arange(1 , __lowerCAmelCase )
snake_case = torch.remainder(__lowerCAmelCase , __lowerCAmelCase )
snake_case = remainders == 0
snake_case = candidates[divisor_indices]
snake_case = torch.max(__lowerCAmelCase )
return largest_divisor, torch.div(__lowerCAmelCase , __lowerCAmelCase , rounding_mode="""floor""" )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Dict )-> Mapping[str, Mapping[int, str]]:
snake_case = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(__snake_case , direction="""inputs""" )
snake_case = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCAmelCase ( self : Tuple )-> int:
return self._config.num_heads
def lowerCAmelCase ( self : Any , __snake_case : PreTrainedTokenizer , __snake_case : int = -1 , __snake_case : int = -1 , __snake_case : bool = False , __snake_case : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case = super(__snake_case , self ).generate_dummy_inputs(
__snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case )
# We need to order the input in the way they appears in the forward()
snake_case = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case , snake_case = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case = seqlen + 2
snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case = [
(torch.zeros(__snake_case ), torch.zeros(__snake_case )) for _ in range(self.num_layers )
]
snake_case = common_inputs["""attention_mask"""]
if self.use_past:
snake_case = ordered_inputs["""attention_mask"""].dtype
snake_case = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__snake_case , __snake_case , dtype=__snake_case )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase ( self : Dict )-> int:
return 13
| 371 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE = {
"gpt-neox-20b": 2048,
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __snake_case : Dict=None , __snake_case : Any=None , __snake_case : Union[str, Any]=None , __snake_case : Optional[Any]="<|endoftext|>" , __snake_case : Tuple="<|endoftext|>" , __snake_case : Tuple="<|endoftext|>" , __snake_case : Union[str, Any]=False , **__snake_case : List[str] , )-> Optional[Any]:
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __snake_case ) != add_prefix_space:
snake_case = getattr(__snake_case , pre_tok_state.pop("""type""" ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**__snake_case )
snake_case = add_prefix_space
def lowerCAmelCase ( self : Tuple , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
snake_case = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : "Conversation" )-> List[int]:
snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__snake_case , add_special_tokens=__snake_case ) + [self.eos_token_id] )
if len(__snake_case ) > self.model_max_length:
snake_case = input_ids[-self.model_max_length :]
return input_ids
| 350 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
snake_case = 1
snake_case = 1
while repunit:
snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def __lowerCamelCase ( __lowerCAmelCase : int = 1_00_00_00 ) -> int:
snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__lowerCAmelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Any )-> List[Any]:
torch.manual_seed(0 )
snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase ( self : Tuple )-> str:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__snake_case )
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
snake_case = self.dummy_uncond_unet
snake_case = DDIMScheduler()
snake_case = self.dummy_vq_model
snake_case = LDMPipeline(unet=__snake_case , vqvae=__snake_case , scheduler=__snake_case )
ldm.to(__snake_case )
ldm.set_progress_bar_config(disable=__snake_case )
snake_case = torch.manual_seed(0 )
snake_case = ldm(generator=__snake_case , num_inference_steps=2 , output_type="""numpy""" ).images
snake_case = torch.manual_seed(0 )
snake_case = ldm(generator=__snake_case , num_inference_steps=2 , output_type="""numpy""" , return_dict=__snake_case )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
snake_case = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] )-> str:
snake_case = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__snake_case )
ldm.set_progress_bar_config(disable=__snake_case )
snake_case = torch.manual_seed(0 )
snake_case = ldm(generator=__snake_case , num_inference_steps=5 , output_type="""numpy""" ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
snake_case = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
snake_case = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 352 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
snake_case = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case = tf_top_k_top_p_filtering(__snake_case , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case = output[output != -float("""inf""" )]
snake_case = tf.cast(
tf.where(tf.not_equal(__snake_case , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1e-12 )
tf.debugging.assert_equal(__snake_case , __snake_case )
@require_tf
class _lowerCAmelCase ( unittest.TestCase , A__ ):
"""simple docstring"""
if is_tf_available():
snake_case_ = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def lowerCAmelCase ( self : str )-> Optional[Any]:
# TF-only test: tf.saved_model export
snake_case = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case = 2
snake_case = 2
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : Any )-> List[str]:
super(__snake_case , self ).__init__()
snake_case = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__snake_case , )
def lowerCAmelCase ( self : List[Any] , __snake_case : List[Any] , __snake_case : List[str] )-> Any:
snake_case = self.model.generate(
input_ids=__snake_case , attention_mask=__snake_case , max_new_tokens=__snake_case , return_dict_in_generate=__snake_case , )
return {"sequences": outputs["sequences"]}
snake_case = [[2, 0], [1_02, 1_03]]
snake_case = [[1, 0], [1, 1]]
snake_case = DummyModel(model=__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__snake_case , __snake_case , signatures={"""serving_default""": dummy_model.serving} )
snake_case = tf.saved_model.load(__snake_case ).signatures["""serving_default"""]
for batch_size in range(1 , len(__snake_case ) + 1 ):
snake_case = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case = serving_func(**__snake_case )["""sequences"""]
snake_case = test_model.generate(**__snake_case , max_new_tokens=__snake_case )
tf.debugging.assert_equal(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Optional[Any] )-> str:
# TF-only test: tf.saved_model export
snake_case = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case = 1
snake_case = 2
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self : Tuple , __snake_case : Tuple )-> Optional[int]:
super(__snake_case , self ).__init__()
snake_case = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__snake_case , )
def lowerCAmelCase ( self : Tuple , __snake_case : Union[str, Any] , __snake_case : Dict )-> Dict:
snake_case = self.model.generate(
input_ids=__snake_case , attention_mask=__snake_case , max_new_tokens=__snake_case , return_dict_in_generate=__snake_case , )
return {"sequences": outputs["sequences"]}
snake_case = [[2], [1_02, 1_03]]
snake_case = [[1], [1, 1]]
snake_case = DummyModel(model=__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__snake_case , __snake_case , signatures={"""serving_default""": dummy_model.serving} )
snake_case = tf.saved_model.load(__snake_case ).signatures["""serving_default"""]
for input_row in range(len(__snake_case ) ):
snake_case = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case = serving_func(**__snake_case )["""sequences"""]
snake_case = test_model.generate(**__snake_case , max_new_tokens=__snake_case )
tf.debugging.assert_equal(__snake_case , __snake_case )
@slow
@require_tensorflow_text
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__snake_case )
class _lowerCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict )-> Any:
super().__init__()
snake_case = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__snake_case , """spiece.model""" ) , """rb""" ).read() )
snake_case = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowerCAmelCase ( self : int , __snake_case : Any , *__snake_case : str , **__snake_case : int )-> str:
snake_case = self.tokenizer.tokenize(__snake_case )
snake_case , snake_case = text.pad_model_inputs(
__snake_case , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case = self.model.generate(input_ids=__snake_case , attention_mask=__snake_case )
return self.tokenizer.detokenize(__snake_case )
snake_case = CompleteSentenceTransformer()
snake_case = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
snake_case = complete_model(__snake_case )
snake_case = tf.keras.Model(__snake_case , __snake_case )
keras_model.save(__snake_case )
def lowerCAmelCase ( self : Any )-> List[Any]:
# Has PT equivalent: this test relies on random sampling
snake_case = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
snake_case = 14
snake_case = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case = """Hello, my dog is cute and"""
snake_case = tokenizer(__snake_case , return_tensors="""tf""" )
snake_case = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
snake_case = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
snake_case = model.generate(**__snake_case , eos_token_id=__snake_case , **__snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
snake_case = model.generate(**__snake_case , eos_token_id=__snake_case , **__snake_case )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowerCAmelCase ( self : Tuple )-> Tuple:
# Has PT equivalent: ample use of framework-specific code
snake_case = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
snake_case = """Hugging Face is a technology company based in New York and Paris."""
snake_case = bart_tokenizer(__snake_case , return_tensors="""tf""" ).input_ids
snake_case = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
snake_case = bart_model.generate(__snake_case ).numpy()
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Any , __snake_case : List[Any]=None , **__snake_case : Tuple )-> Union[str, Any]:
return super().call(__snake_case , **__snake_case )
snake_case = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
snake_case = bart_model.generate(__snake_case , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__snake_case , __snake_case ) )
class _lowerCAmelCase ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Union[str, Any] , **__snake_case : Union[str, Any] )-> Union[str, Any]:
return super().call(__snake_case , **__snake_case )
snake_case = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case = bart_model.generate(__snake_case ).numpy()
with self.assertRaises(__snake_case ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__snake_case , foo="""bar""" )
| 353 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
snake_case = k.replace(__lowerCAmelCase , __lowerCAmelCase )
if k.startswith("""encoder""" ):
snake_case = k.replace(""".attn""" , """.self_attn""" )
snake_case = k.replace("""norm1""" , """self_attn_layer_norm""" )
snake_case = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
snake_case = k.replace("""norm1""" , """self_attn_layer_norm""" )
snake_case = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
snake_case = k.replace("""norm3""" , """final_layer_norm""" )
return k
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] ) -> Tuple:
snake_case = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
snake_case = sd.pop(__lowerCAmelCase )
snake_case = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
snake_case = v
_SCREAMING_SNAKE_CASE = ["START"]
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ) -> List[Any]:
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
snake_case = model["""model"""]
snake_case = BlenderbotConfig.from_json_file(__lowerCAmelCase )
snake_case = BlenderbotForConditionalGeneration(__lowerCAmelCase )
snake_case = m.model.state_dict().keys()
snake_case = []
snake_case = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
snake_case = rename_state_dict_key(__lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
snake_case = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCAmelCase )
m.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
m.half()
m.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 354 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "bert"
def __init__( self : Union[str, Any] , __snake_case : Any=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : int=12 , __snake_case : int=12 , __snake_case : Dict=30_72 , __snake_case : Any="gelu" , __snake_case : List[str]=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=5_12 , __snake_case : Dict=2 , __snake_case : List[Any]=0.02 , __snake_case : int=1e-12 , __snake_case : Optional[Any]=0 , __snake_case : List[Any]="absolute" , __snake_case : List[Any]=True , __snake_case : List[Any]=None , **__snake_case : List[str] , )-> Optional[int]:
super().__init__(pad_token_id=__snake_case , **__snake_case )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Tuple )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 355 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 0 |
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> list[list[int]]:
snake_case = []
create_all_state(1 , __lowerCAmelCase , __lowerCAmelCase , [] , __lowerCAmelCase )
return result
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : list[int] , __lowerCAmelCase : list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__lowerCAmelCase , total_number - level + 2 ):
current_list.append(__lowerCAmelCase )
create_all_state(i + 1 , __lowerCAmelCase , level - 1 , __lowerCAmelCase , __lowerCAmelCase )
current_list.pop()
def __lowerCamelCase ( __lowerCAmelCase : list[list[int]] ) -> None:
for i in total_list:
print(*__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = generate_all_combinations(n, k)
print_all_state(total_list)
| 356 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> str:
snake_case = collections.OrderedDict()
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as reader:
snake_case = reader.readlines()
for index, token in enumerate(__lowerCAmelCase ):
snake_case = token.rstrip("""\n""" )
snake_case = index
return vocab
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Union[str, Any]="<unk>" , __snake_case : Union[str, Any]=2_00 )-> List[str]:
snake_case = vocab
snake_case = unk_token
snake_case = max_input_chars_per_word
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> List[Any]:
snake_case = list(__snake_case )
if len(__snake_case ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case = 0
snake_case = []
while start < len(__snake_case ):
snake_case = len(__snake_case )
snake_case = None
while start < end:
snake_case = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__snake_case )
snake_case = end
return sub_tokens
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
snake_case_ = False
def __init__( self : int , __snake_case : Tuple , __snake_case : Optional[int]="<d>" , __snake_case : int="</d>" , __snake_case : List[Any]="<s>" , __snake_case : List[str]="</s>" , __snake_case : str="<pad>" , __snake_case : Union[str, Any]="<unk>" , __snake_case : str="</n>" , __snake_case : List[str]="</_>" , __snake_case : Union[str, Any]="left" , **__snake_case : Tuple , )-> Union[str, Any]:
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__snake_case , eod_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , unk_token=__snake_case , line_token=__snake_case , space_token=__snake_case , padding_side=__snake_case , **__snake_case , )
snake_case = bod_token
snake_case = eod_token
snake_case = load_vocab(__snake_case )
snake_case = self.encoder[space_token]
snake_case = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Optional[int] )-> List[Any]:
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : str )-> Tuple:
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str )-> List[str]:
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] )-> int:
return len(self.encoder )
def lowerCAmelCase ( self : Any )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __snake_case : Any )-> Union[str, Any]:
snake_case = []
for x in jieba.cut(__snake_case , cut_all=__snake_case ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__snake_case ) )
return output_tokens
def lowerCAmelCase ( self : str , __snake_case : Tuple , **__snake_case : Dict )-> Optional[int]:
snake_case = [i for i in token_ids if i >= 0]
snake_case = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__snake_case , **__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Dict )-> Optional[int]:
return token in self.encoder
def lowerCAmelCase ( self : Optional[Any] , __snake_case : List[str] )-> str:
return "".join(__snake_case )
def lowerCAmelCase ( self : Tuple , __snake_case : int )-> Optional[int]:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __snake_case : List[Any] )-> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowerCAmelCase ( self : int , __snake_case : str , __snake_case : Optional[str] = None )-> Tuple[str]:
if os.path.isdir(__snake_case ):
snake_case = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
snake_case = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case = 0
if " " in self.encoder:
snake_case = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __snake_case : x[1] ) )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
snake_case = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Dict , __snake_case : List[int] , __snake_case : List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is not None:
return [1] + ([0] * len(__snake_case )) + [1] + ([0] * len(__snake_case ))
return [1] + ([0] * len(__snake_case ))
| 3 | 0 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
@add_end_docstrings(A__ )
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Any , *__snake_case : Dict , **__snake_case : Dict )-> Dict:
super().__init__(*__snake_case , **__snake_case )
self.check_model_type(__snake_case )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Dict=None , __snake_case : List[str]=None , __snake_case : Tuple=None , **__snake_case : Optional[int] )-> Optional[Any]:
snake_case , snake_case = {}, {}
if padding is not None:
snake_case = padding
if truncation is not None:
snake_case = truncation
if top_k is not None:
snake_case = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __snake_case : Union["Image.Image", str] , __snake_case : str = None , **__snake_case : Any )-> int:
if isinstance(__snake_case , (Image.Image, str) ) and isinstance(__snake_case , __snake_case ):
snake_case = {"""image""": image, """question""": question}
else:
snake_case = image
snake_case = super().__call__(__snake_case , **__snake_case )
return results
def lowerCAmelCase ( self : Any , __snake_case : List[Any] , __snake_case : Optional[Any]=False , __snake_case : Union[str, Any]=False )-> str:
snake_case = load_image(inputs["""image"""] )
snake_case = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=__snake_case , truncation=__snake_case )
snake_case = self.image_processor(images=__snake_case , return_tensors=self.framework )
model_inputs.update(__snake_case )
return model_inputs
def lowerCAmelCase ( self : Optional[int] , __snake_case : Union[str, Any] )-> str:
snake_case = self.model(**__snake_case )
return model_outputs
def lowerCAmelCase ( self : Dict , __snake_case : str , __snake_case : int=5 )-> int:
if top_k > self.model.config.num_labels:
snake_case = self.model.config.num_labels
if self.framework == "pt":
snake_case = model_outputs.logits.sigmoid()[0]
snake_case , snake_case = probs.topk(__snake_case )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
snake_case = scores.tolist()
snake_case = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 357 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCamelCase ( __lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __lowerCamelCase ( __lowerCAmelCase : np.ndarray , __lowerCAmelCase : np.ndarray ) -> XGBClassifier:
snake_case = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCamelCase ( ) -> None:
snake_case = load_iris()
snake_case , snake_case = data_handling(__lowerCAmelCase )
snake_case , snake_case , snake_case , snake_case = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 )
snake_case = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list ) -> bool:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
if len(__lowerCAmelCase ) == 1:
return True
snake_case = series[1] - series[0]
for index in range(len(__lowerCAmelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __lowerCamelCase ( __lowerCAmelCase : list ) -> float:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" )
if len(__lowerCAmelCase ) == 0:
raise ValueError("""Input list must be a non empty list""" )
snake_case = 0
for val in series:
answer += val
return answer / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase ).text , """html.parser""" )
snake_case = soup.findAll("""h1""" )
snake_case = soup.findAll("""div""" , {"""class""": """maincounter-number"""} )
keys += soup.findAll("""span""" , {"""class""": """panel-title"""} )
values += soup.findAll("""div""" , {"""class""": """number-table-main"""} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCAmelCase , __lowerCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 3 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowerCamelCase ( __lowerCAmelCase : int ) -> str:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
snake_case = precision
snake_case = ceil(precision / 14 )
snake_case = 42_68_80 * Decimal(1_00_05 ).sqrt()
snake_case = 1
snake_case = 13_59_14_09
snake_case = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
snake_case = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = 50
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 359 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
_SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = CamembertTokenizer
snake_case_ = CamembertTokenizerFast
snake_case_ = True
snake_case_ = True
def lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = """<pad>"""
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__snake_case ) , 10_04 )
def lowerCAmelCase ( self : List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_05 )
def lowerCAmelCase ( self : List[str] )-> List[str]:
snake_case = CamembertTokenizer(__snake_case )
tokenizer.save_pretrained(self.tmpdirname )
snake_case = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowerCAmelCase ( self : str )-> Any:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = """I was born in 92000, and this is falsé."""
snake_case = tokenizer.tokenize(__snake_case )
snake_case = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
snake_case = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = self.get_rust_tokenizer()
snake_case = tokenizer.encode(__snake_case )
snake_case = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@slow
def lowerCAmelCase ( self : Any )-> Optional[int]:
# fmt: off
snake_case = {"""input_ids""": [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__snake_case , )
| 3 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_SCREAMING_SNAKE_CASE = 500000
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__)
_SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( __lowerCAmelCase : datasets.Dataset , **__lowerCAmelCase : int ) -> Tuple:
snake_case = dataset.map(**__lowerCAmelCase )
@get_duration
def __lowerCamelCase ( __lowerCAmelCase : datasets.Dataset , **__lowerCAmelCase : Dict ) -> Dict:
snake_case = dataset.filter(**__lowerCAmelCase )
def __lowerCamelCase ( ) -> int:
snake_case = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
snake_case = generate_example_dataset(
os.path.join(__lowerCAmelCase , """dataset.arrow""" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase )
snake_case = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__lowerCAmelCase )
def tokenize(__lowerCAmelCase : str ):
return tokenizer(examples["""text"""] )
snake_case = map(__lowerCAmelCase )
snake_case = map(__lowerCAmelCase , batched=__lowerCAmelCase )
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""numpy""" ):
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""pandas""" ):
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
snake_case = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
snake_case = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase )
snake_case = filter(__lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 360 |
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 361 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "mvp"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __snake_case : Optional[int]=5_02_67 , __snake_case : List[Any]=10_24 , __snake_case : str=12 , __snake_case : Union[str, Any]=40_96 , __snake_case : List[Any]=16 , __snake_case : Tuple=12 , __snake_case : Tuple=40_96 , __snake_case : Union[str, Any]=16 , __snake_case : Any=0.0 , __snake_case : Dict=0.0 , __snake_case : List[Any]="gelu" , __snake_case : Tuple=10_24 , __snake_case : int=0.1 , __snake_case : Any=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=0.02 , __snake_case : Any=0.0 , __snake_case : Optional[int]=False , __snake_case : List[str]=True , __snake_case : Tuple=1 , __snake_case : Tuple=0 , __snake_case : List[str]=2 , __snake_case : Optional[Any]=True , __snake_case : Dict=2 , __snake_case : Any=2 , __snake_case : Any=False , __snake_case : Any=1_00 , __snake_case : Optional[Any]=8_00 , **__snake_case : List[Any] , )-> Optional[int]:
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = classifier_dropout
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case = use_prompt
snake_case = prompt_length
snake_case = prompt_mid_dim
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __snake_case ):
snake_case = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"""The config can simply be saved and uploaded again to be fixed.""" )
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list ) -> float:
_validate_point(__lowerCAmelCase )
_validate_point(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCamelCase ( __lowerCAmelCase : list[float] ) -> None:
if point:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for item in point:
if not isinstance(__lowerCAmelCase , (int, float) ):
snake_case = (
"""Expected a list of numbers as input, found """
F'''{type(__lowerCAmelCase ).__name__}'''
)
raise TypeError(__lowerCAmelCase )
else:
snake_case = F'''Expected a list of numbers as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
else:
raise ValueError("""Missing an input""" )
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list ) -> float:
_validate_point(__lowerCAmelCase )
_validate_point(__lowerCAmelCase )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
# A mock response for an HTTP head request to emulate server down
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Download this model to make sure it's in the cache.
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__snake_case ) as mock_head:
snake_case = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
snake_case = ViTImageProcessor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" )
def lowerCAmelCase ( self : Union[str, Any] )-> str:
with self.assertRaises(__snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" )
self.assertIsNotNone(__snake_case )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def lowerCAmelCase ( cls : Optional[int] )-> Dict:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : List[Any] )-> str:
try:
delete_repo(token=cls._token , repo_id="""test-image-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""test-image-processor""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = ViTImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__snake_case , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__snake_case , getattr(__snake_case , __snake_case ) )
def lowerCAmelCase ( self : str )-> Tuple:
CustomImageProcessor.register_for_auto_class()
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , )
snake_case = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
| 3 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
_SCREAMING_SNAKE_CASE = 3
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case = random.randrange(3 , __lowerCAmelCase )
if pow(__lowerCAmelCase , 2 , __lowerCAmelCase ) == 1:
continue
if pow(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) == 1:
continue
return g
def __lowerCamelCase ( __lowerCAmelCase : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case = rabin_miller.generate_large_prime(__lowerCAmelCase ) # select large prime number.
snake_case = primitive_root(__lowerCAmelCase ) # one primitive root on modulo p.
snake_case = random.randrange(3 , __lowerCAmelCase ) # private_key -> have to be greater than 2 for safety.
snake_case = cryptomath.find_mod_inverse(pow(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
snake_case = (key_size, e_a, e_a, p)
snake_case = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : int ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case = generate_key(__lowerCAmelCase )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' , """w""" ) as fo:
fo.write(F'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' , """w""" ) as fo:
fo.write(F'''{private_key[0]},{private_key[1]}''' )
def __lowerCamelCase ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" , 20_48 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 363 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "data2vec-vision"
def __init__( self : Union[str, Any] , __snake_case : str=7_68 , __snake_case : Dict=12 , __snake_case : List[Any]=12 , __snake_case : List[Any]=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : List[Any]=0.0 , __snake_case : List[Any]=0.0 , __snake_case : Tuple=0.02 , __snake_case : Union[str, Any]=1e-12 , __snake_case : Any=2_24 , __snake_case : Dict=16 , __snake_case : int=3 , __snake_case : str=False , __snake_case : List[Any]=False , __snake_case : str=False , __snake_case : Tuple=False , __snake_case : Tuple=0.1 , __snake_case : int=0.1 , __snake_case : int=True , __snake_case : Dict=[3, 5, 7, 11] , __snake_case : Optional[int]=[1, 2, 3, 6] , __snake_case : str=True , __snake_case : int=0.4 , __snake_case : Any=2_56 , __snake_case : str=1 , __snake_case : int=False , __snake_case : Tuple=2_55 , **__snake_case : List[str] , )-> Dict:
super().__init__(**__snake_case )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = use_mask_token
snake_case = use_absolute_position_embeddings
snake_case = use_relative_position_bias
snake_case = use_shared_relative_position_bias
snake_case = layer_scale_init_value
snake_case = drop_path_rate
snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case = out_indices
snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case = use_auxiliary_head
snake_case = auxiliary_loss_weight
snake_case = auxiliary_channels
snake_case = auxiliary_num_convs
snake_case = auxiliary_concat_input
snake_case = semantic_loss_ignore_index
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = version.parse("1.11" )
@property
def lowerCAmelCase ( self : int )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase ( self : List[Any] )-> float:
return 1e-4
| 364 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : int , *__snake_case : Optional[Any] , **__snake_case : Optional[Any] )-> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Any , *__snake_case : Union[str, Any] , **__snake_case : Optional[Any] )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__snake_case : str , **__snake_case : str )-> int:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : str , *__snake_case : Any , **__snake_case : Union[str, Any] )-> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Dict , *__snake_case : Tuple , **__snake_case : Dict )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__snake_case : Union[str, Any] , **__snake_case : Any )-> Any:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : Dict , *__snake_case : List[str] , **__snake_case : Dict )-> List[str]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__snake_case : Dict , **__snake_case : Dict )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] , *__snake_case : List[str] , **__snake_case : Tuple )-> List[str]:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : str , *__snake_case : str , **__snake_case : Union[str, Any] )-> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *__snake_case : Union[str, Any] , **__snake_case : Tuple )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__snake_case : Dict , **__snake_case : Optional[Any] )-> List[str]:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : Any )-> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__snake_case : int , **__snake_case : Any )-> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__snake_case : List[Any] , **__snake_case : Tuple )-> List[Any]:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : Dict , *__snake_case : Optional[Any] , **__snake_case : Optional[int] )-> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Dict , *__snake_case : Dict , **__snake_case : Optional[Any] )-> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *__snake_case : List[str] , **__snake_case : str )-> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : Optional[int] , *__snake_case : Dict , **__snake_case : Dict )-> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *__snake_case : Optional[Any] , **__snake_case : Optional[Any] )-> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , *__snake_case : Dict , **__snake_case : Dict )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : List[str] , *__snake_case : int , **__snake_case : Tuple )-> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__snake_case : Union[str, Any] , **__snake_case : Tuple )-> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : str , *__snake_case : Dict , **__snake_case : Dict )-> Dict:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : Optional[int] , *__snake_case : Optional[Any] , **__snake_case : List[str] )-> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *__snake_case : Union[str, Any] , **__snake_case : Optional[Any] )-> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : int , *__snake_case : Optional[Any] , **__snake_case : int )-> int:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : List[str] , *__snake_case : List[str] , **__snake_case : str )-> List[str]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Dict , *__snake_case : List[Any] , **__snake_case : List[str] )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : str , *__snake_case : List[str] , **__snake_case : Optional[int] )-> int:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : Optional[int] , *__snake_case : Any , **__snake_case : str )-> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Tuple , *__snake_case : Tuple , **__snake_case : Optional[Any] )-> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : int , *__snake_case : List[str] , **__snake_case : Any )-> Dict:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : str , *__snake_case : Dict , **__snake_case : Optional[int] )-> str:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : str , *__snake_case : Optional[int] , **__snake_case : Tuple )-> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : List[str] , *__snake_case : Optional[int] , **__snake_case : Tuple )-> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class _lowerCAmelCase ( metaclass=A__ ):
"""simple docstring"""
snake_case_ = ["flax"]
def __init__( self : Union[str, Any] , *__snake_case : Tuple , **__snake_case : Dict )-> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , *__snake_case : Optional[int] , **__snake_case : Dict )-> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCAmelCase ( cls : List[Any] , *__snake_case : int , **__snake_case : Union[str, Any] )-> int:
requires_backends(cls , ["""flax"""] )
| 365 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : str ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> int:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any ) -> List[str]:
snake_case = SEWConfig()
if is_finetuned:
snake_case = model.wav_encoder.wav_model.cfg
else:
snake_case = model.cfg
snake_case = fs_config.conv_bias
snake_case = eval(fs_config.conv_feature_layers )
snake_case = [x[0] for x in conv_layers]
snake_case = [x[1] for x in conv_layers]
snake_case = [x[2] for x in conv_layers]
snake_case = """gelu"""
snake_case = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group"""
snake_case = 0.0
snake_case = fs_config.activation_fn.name
snake_case = fs_config.encoder_embed_dim
snake_case = 0.02
snake_case = fs_config.encoder_ffn_embed_dim
snake_case = 1e-5
snake_case = fs_config.encoder_layerdrop
snake_case = fs_config.encoder_attention_heads
snake_case = fs_config.conv_pos_groups
snake_case = fs_config.conv_pos
snake_case = len(__lowerCAmelCase )
snake_case = fs_config.encoder_layers
snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
snake_case = model.cfg
snake_case = fs_config.final_dropout
snake_case = fs_config.layerdrop
snake_case = fs_config.activation_dropout
snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
snake_case = fs_config.attention_dropout
snake_case = fs_config.dropout_input
snake_case = fs_config.dropout
snake_case = fs_config.mask_channel_length
snake_case = fs_config.mask_channel_prob
snake_case = fs_config.mask_length
snake_case = fs_config.mask_prob
snake_case = """Wav2Vec2FeatureExtractor"""
snake_case = """Wav2Vec2CTCTokenizer"""
return config
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : int=None , __lowerCAmelCase : str=True ) -> Any:
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
snake_case = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = convert_config(model[0] , __lowerCAmelCase )
snake_case = model[0].eval()
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = SEWForCTC(__lowerCAmelCase )
else:
snake_case = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 3 | 0 |
'''simple docstring'''
import itertools
import math
def __lowerCamelCase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( ):
snake_case = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def __lowerCamelCase ( __lowerCAmelCase : int = 1_00_01 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = KandinskyVaaControlnetImgaImgPipeline
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = ["image_embeds", "negative_image_embeds", "image", "hint"]
snake_case_ = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case_ = False
@property
def lowerCAmelCase ( self : Dict )-> str:
return 32
@property
def lowerCAmelCase ( self : int )-> List[str]:
return 32
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return self.time_input_dim
@property
def lowerCAmelCase ( self : Optional[Any] )-> Any:
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : str )-> Union[str, Any]:
return 1_00
@property
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
snake_case = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCAmelCase ( self : List[Any] )-> str:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : str )-> List[str]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase ( self : int )-> Dict:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple=0 )-> List[Any]:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case = """cpu"""
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : List[Any] )-> Optional[int]:
snake_case = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
snake_case = torch.from_numpy(np.array(__snake_case ) ).float() / 2_55.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = """A robot, 4k photo"""
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , image=__snake_case , strength=0.85 , generator=__snake_case , negative_prompt="""""" , ).to_tuple()
snake_case = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , hint=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type="""np""" , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 3 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , __snake_case : Dict , __snake_case : List[Any]=13 , __snake_case : str=7 , __snake_case : Any=True , __snake_case : Dict=True , __snake_case : Tuple=True , __snake_case : Union[str, Any]=True , __snake_case : Optional[Any]=99 , __snake_case : Union[str, Any]=16 , __snake_case : Optional[Any]=36 , __snake_case : int=6 , __snake_case : Tuple=6 , __snake_case : List[str]=6 , __snake_case : Optional[int]=37 , __snake_case : Tuple="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=0.1 , __snake_case : str=5_12 , __snake_case : List[Any]=16 , __snake_case : Optional[int]=2 , __snake_case : Tuple=0.02 , __snake_case : int=3 , __snake_case : List[str]=4 , __snake_case : str=None , )-> int:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = embedding_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_hidden_groups
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def lowerCAmelCase ( self : Tuple )-> Any:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCAmelCase ( self : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] )-> int:
snake_case = AlbertModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
snake_case = model(__snake_case , token_type_ids=__snake_case )
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Optional[Any] )-> Any:
snake_case = AlbertForPreTraining(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , sentence_order_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCAmelCase ( self : Any , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : List[Any] )-> Any:
snake_case = AlbertForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Dict , __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : str , __snake_case : str )-> Union[str, Any]:
snake_case = AlbertForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Dict , __snake_case : Any , __snake_case : Any , __snake_case : Dict , __snake_case : Tuple , __snake_case : str , __snake_case : List[str] , __snake_case : int )-> Any:
snake_case = self.num_labels
snake_case = AlbertForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : Dict , __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Any , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple )-> List[Any]:
snake_case = self.num_labels
snake_case = AlbertForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Dict , __snake_case : str )-> Tuple:
snake_case = self.num_choices
snake_case = AlbertForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
def lowerCAmelCase ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Tuple=False )-> List[str]:
snake_case = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
snake_case = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case )
snake_case = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def lowerCAmelCase ( self : Any )-> Any:
snake_case = AlbertModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowerCAmelCase ( self : int )-> List[str]:
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int )-> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
def lowerCAmelCase ( self : Optional[int] )-> Tuple:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowerCAmelCase ( self : str )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*__snake_case )
@slow
def lowerCAmelCase ( self : Optional[int] )-> List[str]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = AlbertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Any )-> Tuple:
snake_case = AlbertModel.from_pretrained("""albert-base-v2""" )
snake_case = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case = model(__snake_case , attention_mask=__snake_case )[0]
snake_case = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __snake_case )
snake_case = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1e-4 ) )
| 367 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_SCREAMING_SNAKE_CASE = logging.getLogger()
def __lowerCamelCase ( __lowerCAmelCase : Path , __lowerCAmelCase : list ) -> Union[str, Any]:
snake_case = """\n""".join(__lowerCAmelCase )
Path(__lowerCAmelCase ).open("""w""" ).writelines(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = "patrickvonplaten/t5-tiny-random"
_SCREAMING_SNAKE_CASE = "sshleifer/bart-tiny-random"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
_SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : Any , __snake_case : Union[str, Any] )-> Any:
snake_case = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
snake_case = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
snake_case = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(__snake_case , __snake_case )
snake_case = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
snake_case = """translation_en_to_de""" if model == T5_TINY else """summarization"""
snake_case = f'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(__snake_case , """argv""" , __snake_case ):
run_generate()
assert Path(__snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
self.run_eval_tester(__snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Optional[int] )-> Dict:
self.run_eval_tester(__snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase ( self : Tuple , __snake_case : Dict )-> Any:
snake_case = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
snake_case = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
snake_case = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
snake_case = Path(self.get_auto_remove_tmp_dir() )
snake_case = str(tmp_dir / """scores.json""" )
snake_case = str(tmp_dir / """val.target""" )
_dump_articles(__snake_case , text["""en"""] )
_dump_articles(__snake_case , text["""de"""] )
snake_case = """translation_en_to_de""" if model == T5_TINY else """summarization"""
snake_case = f'''
run_eval_search.py
{model}
{str(__snake_case )}
{str(__snake_case )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(__snake_case , """argv""" , __snake_case ):
with CaptureStdout() as cs:
run_search()
snake_case = [""" num_beams | length_penalty""", model, """Best score args"""]
snake_case = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(__snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(__snake_case ).exists()
os.remove(Path(__snake_case ) )
| 368 |
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_SCREAMING_SNAKE_CASE = ["a", "b", "c", "d", "e"]
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> Optional[int]:
snake_case = start
# add current to visited
visited.append(__lowerCAmelCase )
snake_case = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(__lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
snake_case = topological_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = topological_sort("a", [], [])
print(sort)
| 3 | 0 |
'''simple docstring'''
import requests
_SCREAMING_SNAKE_CASE = "YOUR API KEY"
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : str = giphy_api_key ) -> list:
snake_case = """+""".join(query.split() )
snake_case = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
snake_case = requests.get(__lowerCAmelCase ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 369 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = "sshleifer/student_marian_en_ro_6_1"
_SCREAMING_SNAKE_CASE = "sshleifer/tiny-mbart"
@require_torch
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : int , __snake_case : List[str]=False , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Tuple=True , )-> Tuple:
snake_case = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__snake_case , num_train_epochs=1 , distributed=__snake_case , extra_args_str=__snake_case , predict_with_generate=__snake_case , do_train=__snake_case , do_eval=__snake_case , do_predict=__snake_case , )
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase ( self : Tuple )-> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase ( self : Union[str, Any] )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case )
@require_torch_multi_gpu
def lowerCAmelCase ( self : str )-> List[Any]:
self.run_seqaseq_quick(distributed=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> Dict:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : int )-> str:
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__snake_case )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase ( self : Any )-> List[Any]:
self.run_seqaseq_quick(
distributed=__snake_case , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__snake_case )
@require_apex
@require_torch_gpu
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__snake_case , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCAmelCase ( self : List[str] , __snake_case : str )-> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case = experiments[experiment_id]
snake_case = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__snake_case , extra_args_str=data["""extra_args_str"""] )
snake_case = len(re.findall(__snake_case , cl.err ) )
self.assertEqual(__snake_case , data["""n_matches"""] )
@slow
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=10 , distributed=__snake_case , )
# Check metrics
snake_case = TrainerState.load_from_json(os.path.join(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = [log for log in logs if """eval_loss""" in log.keys()]
snake_case = eval_metrics[0]
snake_case = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __snake_case )
# test if do_predict saves generations and metrics
snake_case = os.listdir(__snake_case )
snake_case = {os.path.basename(__snake_case ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase ( self : str )-> Any:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__snake_case : str ) -> Tuple[int, float]:
snake_case = """--skip_memory_metrics 0"""
snake_case = self.run_trainer(
max_len=1_28 , model_name=__snake_case , learning_rate=3e-4 , num_train_epochs=1 , optim=__snake_case , distributed=__snake_case , extra_args_str=__snake_case , do_eval=__snake_case , do_predict=__snake_case , n_gpus_to_use=1 , )
# Check metrics
snake_case = TrainerState.load_from_json(Path(__snake_case , """trainer_state.json""" ) ).log_history
snake_case = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
snake_case = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
snake_case = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case , snake_case , snake_case = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
__snake_case , __snake_case , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
__snake_case , __snake_case , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : float = 3e-3 , __snake_case : str = "adafactor" , __snake_case : bool = False , __snake_case : str = None , __snake_case : int = 0 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = True , __snake_case : int = None , )-> Dict:
snake_case = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case = self.get_auto_remove_tmp_dir()
snake_case = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__snake_case )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__snake_case )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
snake_case = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__snake_case )}
'''.split()
snake_case = """
--do_predict
""".split()
snake_case = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case = get_gpu_count()
snake_case = get_torch_dist_unique_port()
snake_case = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
snake_case = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__snake_case , env=self.get_env() )
else:
snake_case = ["""run_translation.py"""] + args
with patch.object(__snake_case , """argv""" , __snake_case ):
main()
return output_dir
| 3 | 0 |
'''simple docstring'''
import functools
from typing import Any
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : list[str] ) -> bool:
# Validation
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
snake_case = {}
snake_case = """WORD_KEEPER"""
for word in words:
snake_case = trie
for c in word:
if c not in trie_node:
snake_case = {}
snake_case = trie_node[c]
snake_case = True
snake_case = len(__lowerCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCAmelCase : int ) -> bool:
if index == len_string:
return True
snake_case = trie
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = trie_node.get(string[i] , __lowerCAmelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCAmelCase , __lowerCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict ) -> int:
for attribute in key.split(""".""" ):
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
snake_case = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> str:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
snake_case = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
snake_case = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
snake_case = """weight_g"""
elif "weight_v" in name:
snake_case = """weight_v"""
elif "weight" in name:
snake_case = """weight"""
elif "bias" in name:
snake_case = """bias"""
else:
snake_case = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __lowerCamelCase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ) -> List[str]:
snake_case = full_name.split("""conv_layers.""" )[-1]
snake_case = name.split(""".""" )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
snake_case = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Dict=True ) -> List[Any]:
if config_path is not None:
snake_case = HubertConfig.from_pretrained(__lowerCAmelCase )
else:
snake_case = HubertConfig()
if is_finetuned:
if dict_path:
snake_case = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case = target_dict.pad_index
snake_case = target_dict.bos_index
snake_case = target_dict.eos_index
snake_case = len(target_dict.symbols )
snake_case = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
snake_case = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
snake_case = True if config.feat_extract_norm == """layer""" else False
snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
snake_case = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
snake_case = HubertForCTC(__lowerCAmelCase )
else:
snake_case = HubertModel(__lowerCAmelCase )
if is_finetuned:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3 | 0 |
'''simple docstring'''
from collections import deque
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> List[Any]:
snake_case = len(__lowerCAmelCase )
snake_case = deque()
snake_case = [False for _ in range(__lowerCAmelCase )]
snake_case = [-1 for _ in range(__lowerCAmelCase )]
snake_case = index_of[:]
def strong_connect(__lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ):
snake_case = index # the number when this node is seen
snake_case = index # lowest rank node reachable from here
index += 1
stack.append(__lowerCAmelCase )
snake_case = True
for w in g[v]:
if index_of[w] == -1:
snake_case = strong_connect(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
snake_case = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
snake_case = []
snake_case = stack.pop()
snake_case = False
component.append(__lowerCAmelCase )
while w != v:
snake_case = stack.pop()
snake_case = False
component.append(__lowerCAmelCase )
components.append(__lowerCAmelCase )
return index
snake_case = []
for v in range(__lowerCAmelCase ):
if index_of[v] == -1:
strong_connect(__lowerCAmelCase , 0 , __lowerCAmelCase )
return components
def __lowerCamelCase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
snake_case = [[] for _ in range(__lowerCAmelCase )]
for u, v in edges:
g[u].append(__lowerCAmelCase )
return g
if __name__ == "__main__":
# Test
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_SCREAMING_SNAKE_CASE = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_SCREAMING_SNAKE_CASE = [(u, v) for u, v in zip(source, target)]
_SCREAMING_SNAKE_CASE = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 371 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = 0
def lowerCAmelCase ( self : str )-> Any:
snake_case = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[str] )-> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case = AutoImageProcessor.from_pretrained(__snake_case ).to_dict()
config_dict.pop("""image_processor_type""" )
snake_case = CLIPImageProcessor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
snake_case = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Dict:
with self.assertRaisesRegex(
__snake_case , """clip-base is not a local folder and is not a valid model identifier""" ):
snake_case = AutoImageProcessor.from_pretrained("""clip-base""" )
def lowerCAmelCase ( self : Tuple )-> int:
with self.assertRaisesRegex(
__snake_case , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
snake_case = AutoImageProcessor.from_pretrained(__snake_case , revision="""aaaaaa""" )
def lowerCAmelCase ( self : str )-> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : List[str] )-> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def lowerCAmelCase ( self : List[str] )-> Dict:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoImageProcessor.register(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = Path(__snake_case ) / """preprocessor_config.json"""
snake_case = Path(__snake_case ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(__snake_case , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(__snake_case , """w""" ) )
snake_case = CustomImageProcessor.from_pretrained(__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__snake_case )
snake_case = AutoImageProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Dict )-> Optional[int]:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = True
try:
AutoConfig.register("""custom""" , __snake_case )
AutoImageProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
snake_case = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=__snake_case )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(__snake_case , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Dict ) -> Optional[Any]:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __lowerCamelCase ( __lowerCAmelCase : dict[int, list[int]] ) -> list[tuple[int, int]]:
snake_case = 0
snake_case = len(__lowerCAmelCase ) # No of vertices in graph
snake_case = [0] * n
snake_case = [False] * n
def dfs(__lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ):
snake_case = True
snake_case = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , id_ )
snake_case = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
snake_case = min(low[at] , low[to] )
snake_case = []
for i in range(__lowerCAmelCase ):
if not visited[i]:
dfs(__lowerCAmelCase , -1 , __lowerCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "Salesforce/blip-image-captioning-base"
snake_case_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
snake_case_ = "image_captioner"
snake_case_ = AutoModelForVisionaSeq
snake_case_ = ["image"]
snake_case_ = ["text"]
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any )-> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : str , __snake_case : "Image" )-> int:
return self.pre_processor(images=__snake_case , return_tensors="""pt""" )
def lowerCAmelCase ( self : Any , __snake_case : List[str] )-> Union[str, Any]:
return self.model.generate(**__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : Any )-> Dict:
return self.pre_processor.batch_decode(__snake_case , skip_special_tokens=__snake_case )[0].strip()
| 3 | 0 |
'''simple docstring'''
import os
import platform
import sys
_SCREAMING_SNAKE_CASE = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 351 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def __lowerCamelCase ( __lowerCAmelCase : List[str] ) -> Any:
snake_case = VideoMAEConfig()
set_architecture_configs(__lowerCAmelCase , __lowerCAmelCase )
if "finetuned" not in model_name:
snake_case = False
if "finetuned" in model_name:
snake_case = """huggingface/label-files"""
if "kinetics" in model_name:
snake_case = 4_00
snake_case = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
snake_case = 1_74
snake_case = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
snake_case = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
if "small" in model_name:
snake_case = 3_84
snake_case = 15_36
snake_case = 12
snake_case = 16
snake_case = 12
snake_case = 3
snake_case = 1_92
snake_case = 7_68
elif "large" in model_name:
snake_case = 10_24
snake_case = 40_96
snake_case = 24
snake_case = 16
snake_case = 12
snake_case = 8
snake_case = 5_12
snake_case = 20_48
elif "huge" in model_name:
snake_case = 12_80
snake_case = 51_20
snake_case = 32
snake_case = 16
snake_case = 12
snake_case = 8
snake_case = 6_40
snake_case = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Union[str, Any]:
if "encoder." in name:
snake_case = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
snake_case = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
snake_case = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
snake_case = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
snake_case = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
snake_case = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
snake_case = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
snake_case = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
snake_case = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
snake_case = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
snake_case = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
snake_case = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
snake_case = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
snake_case = name.replace("""head""" , """classifier""" )
return name
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : str ) -> List[str]:
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(__lowerCAmelCase )
if key.startswith("""encoder.""" ):
snake_case = key.replace("""encoder.""" , """""" )
if "qkv" in key:
snake_case = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
snake_case = config.decoder_hidden_size
snake_case = int(key_split[2] )
snake_case = """decoder.decoder_layers."""
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = config.hidden_size
snake_case = int(key_split[1] )
snake_case = """videomae.encoder.layer."""
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = val
return orig_state_dict
def __lowerCamelCase ( ) -> int:
snake_case = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
snake_case = np.load(__lowerCAmelCase )
return list(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] ) -> Optional[Any]:
snake_case = get_videomae_config(__lowerCAmelCase )
if "finetuned" in model_name:
snake_case = VideoMAEForVideoClassification(__lowerCAmelCase )
else:
snake_case = VideoMAEForPreTraining(__lowerCAmelCase )
# download original checkpoint, hosted on Google Drive
snake_case = """pytorch_model.bin"""
gdown.cached_download(__lowerCAmelCase , __lowerCAmelCase , quiet=__lowerCAmelCase )
snake_case = torch.load(__lowerCAmelCase , map_location="""cpu""" )
if "model" in files:
snake_case = files["""model"""]
else:
snake_case = files["""module"""]
snake_case = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# verify model on basic input
snake_case = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case = prepare_video()
snake_case = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
if "finetuned" not in model_name:
snake_case = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
snake_case = torch.load(__lowerCAmelCase )
snake_case = model(**__lowerCAmelCase )
snake_case = outputs.logits
snake_case = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case = torch.Size([1, 4_00] )
snake_case = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case = torch.Size([1, 14_08, 15_36] )
snake_case = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case = torch.Size([1, 1_74] )
snake_case = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __lowerCAmelCase , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case = outputs.loss
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(__lowerCAmelCase , organization="""nielsr""" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 352 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3 | 0 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_SCREAMING_SNAKE_CASE = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_SCREAMING_SNAKE_CASE = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_SCREAMING_SNAKE_CASE = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : Any , __snake_case : str )-> Dict:
snake_case = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
snake_case = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
snake_case = evaluate(dataset=__snake_case , predictions=__snake_case )
return score
| 353 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "WhisperFeatureExtractor"
snake_case_ = "WhisperTokenizer"
def __init__( self : Dict , __snake_case : Any , __snake_case : int )-> List[Any]:
super().__init__(__snake_case , __snake_case )
snake_case = self.feature_extractor
snake_case = False
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : int=True )-> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=__snake_case , language=__snake_case , no_timestamps=__snake_case )
def __call__( self : str , *__snake_case : Tuple , **__snake_case : Union[str, Any] )-> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__snake_case , **__snake_case )
snake_case = kwargs.pop("""audio""" , __snake_case )
snake_case = kwargs.pop("""sampling_rate""" , __snake_case )
snake_case = kwargs.pop("""text""" , __snake_case )
if len(__snake_case ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
snake_case = self.feature_extractor(__snake_case , *__snake_case , sampling_rate=__snake_case , **__snake_case )
if text is not None:
snake_case = self.tokenizer(__snake_case , **__snake_case )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings["""input_ids"""]
return inputs
def lowerCAmelCase ( self : Union[str, Any] , *__snake_case : Union[str, Any] , **__snake_case : str )-> Optional[Any]:
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , *__snake_case : Any , **__snake_case : Union[str, Any] )-> List[str]:
return self.tokenizer.decode(*__snake_case , **__snake_case )
def lowerCAmelCase ( self : Any , __snake_case : str , __snake_case : Dict="np" )-> Any:
return self.tokenizer.get_prompt_ids(__snake_case , return_tensors=__snake_case )
| 3 | 0 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float(moles / volume ) * nfactor )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float ) -> float:
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 1
for i in range(0 , len(__lowerCAmelCase ) ):
total *= numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case = 0
snake_case = str(__lowerCAmelCase )
while len(__lowerCAmelCase ) != 1:
snake_case = [int(__lowerCAmelCase ) for i in num_string]
snake_case = 0
for i in range(0 , len(__lowerCAmelCase ) ):
total += numbers[i]
snake_case = str(__lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] ) -> Dict:
snake_case = []
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
F'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
F'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
F'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
F'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
F'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
snake_case = []
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
F'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
F'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
F'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
F'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', F'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', F'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', F'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', F'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(F'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', F'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Optional[Any]:
snake_case = []
token.append((F'''cvt.encoder.stages.{idx}.cls_token''', """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Any:
snake_case = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : str ) -> Optional[int]:
snake_case = """imagenet-1k-id2label.json"""
snake_case = 10_00
snake_case = """huggingface/label-files"""
snake_case = num_labels
snake_case = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
snake_case = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = snake_case = CvtConfig(num_labels=__lowerCAmelCase , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case = [2, 2, 20]
snake_case = [3, 12, 16]
snake_case = [1_92, 7_68, 10_24]
snake_case = CvtForImageClassification(__lowerCAmelCase )
snake_case = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case = image_size
snake_case = torch.load(__lowerCAmelCase , map_location=torch.device("""cpu""" ) )
snake_case = OrderedDict()
snake_case = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case = list_of_state_dict + cls_token(__lowerCAmelCase )
snake_case = list_of_state_dict + embeddings(__lowerCAmelCase )
for cnt in range(config.depth[idx] ):
snake_case = list_of_state_dict + attention(__lowerCAmelCase , __lowerCAmelCase )
snake_case = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
snake_case = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=r"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 3 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.