code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number | (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number & ~(1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return number ^ (1 << position)
def _A ( lowercase , lowercase ):
"""simple docstring"""
return ((number >> position) & 1) == 1
def _A ( lowercase , lowercase ):
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __lowerCAmelCase :
__lowerCamelCase = PegasusConfig
__lowerCamelCase = {}
__lowerCamelCase = '''gelu'''
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=40 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase = prepare_pegasus_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFPegasusModel(config=_snake_case ).get_decoder()
_lowerCAmelCase = inputs_dict["""input_ids"""]
_lowerCAmelCase = input_ids[:1, :]
_lowerCAmelCase = inputs_dict["""attention_mask"""][:1, :]
_lowerCAmelCase = inputs_dict["""head_mask"""]
_lowerCAmelCase = 1
# first forward pass
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , head_mask=_snake_case , use_cache=_snake_case )
_lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )[0]
_lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1e-3 )
def _UpperCAmelCase ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCAmelCase = tf.cast(tf.math.not_equal(snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFPegasusModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_snake_case )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__lowerCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCamelCase = '''google/pegasus-xsum'''
@cached_property
def snake_case ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.translate_src_text(**_snake_case )
assert self.expected_text == generated_words
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.tokenizer(self.src_text , **_snake_case , padding=_snake_case , return_tensors="""tf""" )
_lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_snake_case , )
_lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_snake_case )
return generated_words
@slow
def snake_case ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 82 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 0 |
'''simple docstring'''
from manim import *
class lowercase__ ( lowercase ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Any = Rectangle(height=0.5 ,width=0.5 )
_UpperCamelCase : Tuple = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
_UpperCamelCase : Union[str, Any] = [mem.copy() for i in range(6 )]
_UpperCamelCase : Dict = [mem.copy() for i in range(6 )]
_UpperCamelCase : Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : List[Any] = VGroup(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Tuple = Text('CPU' ,font_size=24 )
_UpperCamelCase : Optional[Any] = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [mem.copy() for i in range(1 )]
_UpperCamelCase : int = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Any = Text('GPU' ,font_size=24 )
_UpperCamelCase : Dict = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ ,lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
_UpperCamelCase : int = [mem.copy() for i in range(6 )]
_UpperCamelCase : Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0 )
_UpperCamelCase : Optional[Any] = Text('Model' ,font_size=24 )
_UpperCamelCase : int = Group(lowerCamelCase__ ,lowerCamelCase__ ).arrange(lowerCamelCase__ ,buff=0.5 ,aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,Create(lowerCamelCase__ ,run_time=1 ) ,)
_UpperCamelCase : List[Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' ,font_size=24 ,)
_UpperCamelCase : int = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase : Tuple = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ ,run_time=2.5 ) ,Write(lowerCamelCase__ ) ,Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
_UpperCamelCase : int = []
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : str = []
for i, rect in enumerate(lowerCamelCase__ ):
_UpperCamelCase : Any = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ ,opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
_UpperCamelCase : List[Any] = 0.4_6 / 4
_UpperCamelCase : Optional[Any] = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=lowerCamelCase__ ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=lowerCamelCase__ ,buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ ,run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 83 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
"""simple docstring"""
import math
def _snake_case ( lowercase__ : int = 1_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = sum(i * i for i in range(1 , n + 1 ) )
lowerCAmelCase_ :Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = credit_card_number
snake_case_ = 0
snake_case_ = len(snake_case ) - 2
for i in range(snake_case , -1 , -2 ):
# double the value of every second digit
snake_case_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
snake_case_ = cc_number[:i] + str(snake_case ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(snake_case ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(snake_case ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(snake_case ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(snake_case ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 85 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = torch.device("""cpu""")
def __lowerCAmelCase ():
__lowerCAmelCase : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Dict = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def __lowerCAmelCase (_UpperCamelCase ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = dct.pop(_UpperCamelCase )
__lowerCAmelCase : Dict = val
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = []
for k in state_dict.keys():
__lowerCAmelCase : str = k
if ".pwconv" in k:
__lowerCAmelCase : List[str] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__lowerCAmelCase : Tuple = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__lowerCAmelCase : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__lowerCAmelCase : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__lowerCAmelCase : Optional[Any] = k_new.split('.' )
if ls[2].isdigit():
__lowerCAmelCase : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__lowerCAmelCase : int = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__lowerCAmelCase : Tuple = 1000
__lowerCAmelCase : Tuple = 'huggingface/label-files'
__lowerCAmelCase : Any = 'imagenet-1k-id2label.json'
__lowerCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : Optional[Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[Any] = idalabel
__lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__lowerCAmelCase : Optional[int] = [3, 3, 6, 4]
__lowerCAmelCase : Union[str, Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__lowerCAmelCase : str = [3, 3, 9, 6]
__lowerCAmelCase : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__lowerCAmelCase : int = [4, 3, 10, 5]
__lowerCAmelCase : List[Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__lowerCAmelCase : Union[str, Any] = [4, 4, 12, 6]
__lowerCAmelCase : List[str] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__lowerCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' , check_hash=_UpperCamelCase )
else:
__lowerCAmelCase : Optional[int] = torch.load(_UpperCamelCase , map_location='cpu' )
__lowerCAmelCase : List[Any] = checkpoint
__lowerCAmelCase : Optional[int] = create_rename_keys(_UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
__lowerCAmelCase : Optional[Any] = SwiftFormerForImageClassification(_UpperCamelCase ).eval()
hf_model.load_state_dict(_UpperCamelCase )
# prepare test inputs
__lowerCAmelCase : Optional[Any] = prepare_img()
__lowerCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained('preprocessor_config' )
__lowerCAmelCase : Dict = processor(images=_UpperCamelCase , return_tensors='pt' )
# compare outputs from both models
__lowerCAmelCase : int = get_expected_output(_UpperCamelCase )
__lowerCAmelCase : Tuple = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
lowerCamelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt) | 86 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 0 |
from __future__ import annotations
from math import gcd
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int = 2 , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 3 , ):
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("The input value cannot be less than 2")
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int) -> int:
return (pow(_lowerCamelCase , 2) + step) % modulus
for _ in range(_lowerCamelCase):
# These track the position within the cycle detection logic.
lowercase__ : Optional[int] = seed
lowercase__ : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowercase__ : List[Any] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : List[str] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
lowercase__ : int = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowercase__ : Any = gcd(hare - tortoise , _lowerCamelCase)
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowercase__ : Dict = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"{args.num} is probably prime")
else:
UpperCamelCase = args.num // divisor
print(f"{args.num} = {divisor} * {quotient}")
| 87 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''t5'''
__snake_case = ['''past_key_values''']
__snake_case = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=32_128 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=128 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=1e-6 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str , ) ->Optional[Any]:
"""simple docstring"""
a = vocab_size
a = d_model
a = d_kv
a = d_ff
a = num_layers
a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a = num_heads
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = dropout_rate
a = layer_norm_epsilon
a = initializer_factor
a = feed_forward_proj
a = use_cache
a = self.feed_forward_proj.split('''-''' )
a = act_info[-1]
a = act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
a = '''past_encoder_sequence + sequence'''
a = {0: '''batch'''}
a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a = {0: '''batch''', 1: '''decoder_sequence'''}
a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return 13
| 0 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Dict = '▁'
__lowerCAmelCase : Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
__lowerCAmelCase : Union[str, Any] = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__lowerCAmelCase : Union[str, Any] = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__lowerCAmelCase : Optional[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = ["""input_ids""", """attention_mask"""]
a__ = []
a__ = []
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Dict="<unk>" , UpperCamelCase__ : Tuple="<pad>" , UpperCamelCase__ : str="<mask>" , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[Dict[str, Any]] = None , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : int , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
__magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
__magic_name__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__magic_name__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__magic_name__ = 1
__magic_name__ = len(self.sp_model )
__magic_name__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCamelCase__ )
}
__magic_name__ = {v: k for k, v in self.lang_code_to_id.items()}
__magic_name__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__magic_name__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__magic_name__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__magic_name__ = src_lang if src_lang is not None else """en_XX"""
__magic_name__ = self.lang_code_to_id[self._src_lang]
__magic_name__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : int ) -> int:
"""simple docstring"""
__magic_name__ = self.__dict__.copy()
__magic_name__ = None
__magic_name__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
__magic_name__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__magic_name__ = {}
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowercase ( self : Optional[int] , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__magic_name__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
__magic_name__ = [1] * len(self.prefix_tokens )
__magic_name__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCamelCase__ )) + ([0] * len(UpperCamelCase__ )) + suffix_ones
def _lowercase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__magic_name__ = [self.sep_token_id]
__magic_name__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] , UpperCamelCase__ : Optional[str] , **UpperCamelCase__ : int ) -> List[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__magic_name__ = src_lang
__magic_name__ = self(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = self.convert_tokens_to_ids(UpperCamelCase__ )
__magic_name__ = tgt_lang_id
return inputs
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__magic_name__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def _lowercase ( self : Dict , UpperCamelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__magic_name__ = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase ( self : Any , UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase ( self : Any , UpperCamelCase__ : Tuple ) -> str:
"""simple docstring"""
__magic_name__ = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip()
return out_string
def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , """wb""" ) as fi:
__magic_name__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = "en_XX" , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "ro_RO" , **UpperCamelCase__ : int , ) -> BatchEncoding:
"""simple docstring"""
__magic_name__ = src_lang
__magic_name__ = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__magic_name__ = self.lang_code_to_id[src_lang]
__magic_name__ = []
__magic_name__ = [self.eos_token_id, self.cur_lang_code]
def _lowercase ( self : Tuple , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__magic_name__ = self.lang_code_to_id[lang]
__magic_name__ = []
__magic_name__ = [self.eos_token_id, self.cur_lang_code]
| 88 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[list[float]]:
_a : Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_a : List[str] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
_a : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
_a , _a : Dict = matrix[1][1], matrix[0][0]
_a , _a : Optional[int] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_a : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
_a : Any = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_a : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_a : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_a : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_a : Any = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_a : Any = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_a : Dict = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_a : int = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_a : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_a : Dict = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_a : Dict = array(lowerCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
_a : int = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_a : Union[str, Any] = array(lowerCAmelCase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase_ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 89 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "contact@muhammadumerfarooq.me"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 0 |
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = {}
def lowercase_ ( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
if vertex not in self.adjacency:
__lowerCamelCase = {}
self.num_vertices += 1
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
self.add_vertex(lowerCamelCase__ )
self.add_vertex(lowerCamelCase__ )
if head == tail:
return
__lowerCamelCase = weight
__lowerCamelCase = weight
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = self.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase__ ) ):
__lowerCamelCase = list(edges[i] )
edges.sort(key=lambda lowerCamelCase__ : e[2] )
for i in range(len(lowerCamelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowerCamelCase = edges[i][2] + 1
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = weight
__lowerCamelCase = weight
def __str__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowerCamelCase = self.adjacency[head][tail]
string += f"""{head} -> {tail} == {weight}\n"""
return string.rstrip('\n' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def lowercase_ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> str:
'''simple docstring'''
__lowerCamelCase = Graph()
if vertices is None:
__lowerCamelCase = []
if edges is None:
__lowerCamelCase = []
for vertex in vertices:
g.add_vertex(lowerCamelCase__ )
for edge in edges:
g.add_edge(*lowerCamelCase__ )
return g
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = {}
__lowerCamelCase = {}
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.parent )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
if item in self.parent:
return self.find(lowerCamelCase__ )
__lowerCamelCase = item
__lowerCamelCase = 0
return item
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCamelCase__ )
if item != self.parent[item]:
__lowerCamelCase = self.find(self.parent[item] )
return self.parent[item]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = self.find(lowerCamelCase__ )
__lowerCamelCase = self.find(lowerCamelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowerCamelCase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowerCamelCase = roota
return roota
return None
@staticmethod
def lowercase_ ( lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = graph.num_vertices
__lowerCamelCase = Graph.UnionFind()
__lowerCamelCase = []
while num_components > 1:
__lowerCamelCase = {}
for vertex in graph.get_vertices():
__lowerCamelCase = -1
__lowerCamelCase = graph.get_edges()
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = edge
__lowerCamelCase = union_find.find(lowerCamelCase__ )
__lowerCamelCase = union_find.find(lowerCamelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowerCamelCase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = cheap_edge[vertex]
if union_find.find(lowerCamelCase__ ) != union_find.find(lowerCamelCase__ ):
union_find.union(lowerCamelCase__ , lowerCamelCase__ )
mst_edges.append(cheap_edge[vertex] )
__lowerCamelCase = num_components - 1
__lowerCamelCase = Graph.build(edges=lowerCamelCase__ )
return mst
| 90 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def _a ( ) -> Optional[int]:
a = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a = parser.parse_args()
return args.f
def _a ( a :Any ) -> Tuple:
a = {}
a = os.path.join(a , '''all_results.json''' )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
a = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
def _a ( ) -> int:
a = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( lowercase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : str ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
a = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''image_classification_no_trainer''' ) ) )
| 0 | 0 |
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _A (__a , __a ) -> Dict:
"""simple docstring"""
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _A (__a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ParquetDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _A (__a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : List[str] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : int = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = ParquetDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _A (__a , __a , __a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : Optional[Any] = ParquetDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_parquet_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
if issubclass(__a , __a ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = parquet_path
elif issubclass(__a , __a ):
SCREAMING_SNAKE_CASE_ : int = [parquet_path]
SCREAMING_SNAKE_CASE_ : Dict = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : Optional[Any] = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_dataset(__a , __a )
def _A (__a , __a , __a=("train",) ) -> Any:
"""simple docstring"""
assert isinstance(__a , __a )
for split in splits:
SCREAMING_SNAKE_CASE_ : Any = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _A (__a , __a , __a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : int = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def _A (__a , __a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : List[Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Dict = ParquetDatasetReader({'''train''': parquet_path} , features=__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _A (__a , __a , __a ) -> Dict:
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Any = {split: parquet_path}
else:
SCREAMING_SNAKE_CASE_ : Dict = '''train'''
SCREAMING_SNAKE_CASE_ : List[str] = {'''train''': parquet_path, '''test''': parquet_path}
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / '''cache'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
SCREAMING_SNAKE_CASE_ : Tuple = ParquetDatasetReader(__a , cache_dir=__a ).read()
_check_parquet_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _A (__a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE_ : Any = pq.ParquetFile(tmp_path / '''foo.parquet''' )
SCREAMING_SNAKE_CASE_ : Tuple = pf.read()
assert dataset.data.table == output_table
def _A (__a , __a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = str(shared_datadir / '''test_image_rgb.jpg''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''image''': [image_path]}
SCREAMING_SNAKE_CASE_ : Dict = Features({'''image''': Image()} )
SCREAMING_SNAKE_CASE_ : Optional[int] = Dataset.from_dict(__a , features=__a )
SCREAMING_SNAKE_CASE_ : Any = ParquetDatasetWriter(__a , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
SCREAMING_SNAKE_CASE_ : Any = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
SCREAMING_SNAKE_CASE_ : str = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=__a ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _A (__a , __a ) -> str:
"""simple docstring"""
assert get_writer_batch_size(__a ) == expected
| 91 |
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = (1 - _cos) / 2
__lowerCAmelCase = 1 - _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = (1 + _cos) / 2
__lowerCAmelCase = -1 - _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = _sin / 2
__lowerCAmelCase = 0
__lowerCAmelCase = -ba
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = 1 + alpha * big_a
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha * big_a
__lowerCAmelCase = 1 + alpha / big_a
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha / big_a
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase = 2 * sqrt(SCREAMING_SNAKE_CASE_ ) * alpha
__lowerCAmelCase = big_a * (pmc + aaa)
__lowerCAmelCase = 2 * big_a * mpc
__lowerCAmelCase = big_a * (pmc - aaa)
__lowerCAmelCase = ppmc + aaa
__lowerCAmelCase = -2 * pmpc
__lowerCAmelCase = ppmc - aaa
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float = 1 / sqrt(2 ) , ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 10 ** (gain_db / 40)
__lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase = 2 * sqrt(SCREAMING_SNAKE_CASE_ ) * alpha
__lowerCAmelCase = big_a * (ppmc + aaa)
__lowerCAmelCase = -2 * big_a * pmpc
__lowerCAmelCase = big_a * (ppmc - aaa)
__lowerCAmelCase = pmc + aaa
__lowerCAmelCase = 2 * mpc
__lowerCAmelCase = pmc - aaa
__lowerCAmelCase = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 92 |
def _a ( a :float , a :float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 0 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : str = {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = '''lilt'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_05_22 , __SCREAMING_SNAKE_CASE=7_68 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=30_72 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_12 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1E-1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=10_24 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : int = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : int = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : Tuple = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : Tuple = classifier_dropout
lowercase_ : Any = channel_shrink_ratio
lowercase_ : List[Any] = max_ad_position_embeddings
| 93 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = AutoencoderKL
SCREAMING_SNAKE_CASE__ = 'sample'
SCREAMING_SNAKE_CASE__ = 1e-2
@property
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = 4
a :int = 3
a :int = (32, 32)
a :List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_lowerCamelCase )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (3, 32, 32)
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
a :Any = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def SCREAMING_SNAKE_CASE__ ( self ):
# enable deterministic behavior for gradient checkpointing
a , a :Any = self.prepare_init_args_and_inputs_for_common()
a :Dict = self.model_class(**_lowerCamelCase )
model.to(_lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
a :List[str] = model(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
a :Tuple = torch.randn_like(_lowerCamelCase )
a :List[str] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
a :Optional[Any] = self.model_class(**_lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(_lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
a :Optional[int] = model_a(**_lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
a :List[Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
a :str = dict(model.named_parameters() )
a :Any = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :Optional[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowerCamelCase )
a :str = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
a :Optional[int] = model.to(_lowerCamelCase )
model.eval()
if torch_device == "mps":
a :List[str] = torch.manual_seed(0 )
else:
a :List[Any] = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
a :Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a :List[Any] = image.to(_lowerCamelCase )
with torch.no_grad():
a :Tuple = model(_lowerCamelCase , sample_posterior=_lowerCamelCase , generator=_lowerCamelCase ).sample
a :Any = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
a :Optional[Any] = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
a :Tuple = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
a :Optional[int] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(_lowerCamelCase , _lowerCamelCase , rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(_lowerCamelCase ) for s in shape] )}.npy'''
def SCREAMING_SNAKE_CASE__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 , _lowerCamelCase=(4, 3, 512, 512) , _lowerCamelCase=False ):
a :Union[str, Any] = torch.floataa if fpaa else torch.floataa
a :Any = torch.from_numpy(load_hf_numpy(self.get_file_format(_lowerCamelCase , _lowerCamelCase ) ) ).to(_lowerCamelCase ).to(_lowerCamelCase )
return image
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase="CompVis/stable-diffusion-v1-4" , _lowerCamelCase=False ):
a :Optional[Any] = '''fp16''' if fpaa else None
a :Optional[int] = torch.floataa if fpaa else torch.floataa
a :Any = AutoencoderKL.from_pretrained(
_lowerCamelCase , subfolder='''vae''' , torch_dtype=_lowerCamelCase , revision=_lowerCamelCase , )
model.to(_lowerCamelCase ).eval()
return model
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase=0 ):
if torch_device == "mps":
return torch.manual_seed(_lowerCamelCase )
return torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = self.get_sd_vae_model()
a :str = self.get_sd_image(_lowerCamelCase )
a :Dict = self.get_generator(_lowerCamelCase )
with torch.no_grad():
a :int = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
a :List[str] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a :Optional[int] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :List[Any] = self.get_sd_vae_model(fpaa=_lowerCamelCase )
a :str = self.get_sd_image(_lowerCamelCase , fpaa=_lowerCamelCase )
a :Any = self.get_generator(_lowerCamelCase )
with torch.no_grad():
a :int = model(_lowerCamelCase , generator=_lowerCamelCase , sample_posterior=_lowerCamelCase ).sample
assert sample.shape == image.shape
a :Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a :int = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
a :Optional[int] = self.get_sd_vae_model()
a :Optional[Any] = self.get_sd_image(_lowerCamelCase )
with torch.no_grad():
a :str = model(_lowerCamelCase ).sample
assert sample.shape == image.shape
a :Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a :Optional[int] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :int = self.get_sd_vae_model()
a :str = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
a :Union[str, Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
a :str = sample[-1, -2:, :2, -2:].flatten().cpu()
a :List[Any] = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :int = self.get_sd_vae_model(fpaa=_lowerCamelCase )
a :List[Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
a :List[Any] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
a :Any = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a :Any = torch.tensor(_lowerCamelCase )
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = self.get_sd_vae_model(fpaa=_lowerCamelCase )
a :Dict = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=_lowerCamelCase )
with torch.no_grad():
a :Union[str, Any] = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a :Optional[int] = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = self.get_sd_vae_model()
a :Union[str, Any] = self.get_sd_image(_lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
a :Dict = model.decode(_lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a :Tuple = model.decode(_lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :Union[str, Any] = self.get_sd_vae_model()
a :Optional[int] = self.get_sd_image(_lowerCamelCase )
a :Dict = self.get_generator(_lowerCamelCase )
with torch.no_grad():
a :Optional[Any] = model.encode(_lowerCamelCase ).latent_dist
a :int = dist.sample(generator=_lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
a :Any = sample[0, -1, -3:, -3:].flatten().cpu()
a :List[str] = torch.tensor(_lowerCamelCase )
a :int = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase )
| 94 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ProphetNetTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict:
"""simple docstring"""
a = '''UNwant\u00E9d,running'''
a = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : List[str] = """▁"""
UpperCAmelCase : List[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
}
}
UpperCAmelCase : Optional[Any] = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
UpperCAmelCase : Dict = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : str = ["""input_ids""", """attention_mask"""]
_lowercase : List[int] = []
_lowercase : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
a__ : List[Any] =AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a__ : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
a__ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
a__ : Union[str, Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a__ : List[Any] ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ : Tuple =1
a__ : int =len(self.sp_model )
a__ : Optional[int] ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
a__ : Tuple ={v: k for k, v in self.lang_code_to_id.items()}
a__ : Dict =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a__ : Union[str, Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
a__ : Union[str, Any] =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
a__ : Any =src_lang if src_lang is not None else "en_XX"
a__ : Union[str, Any] =self.lang_code_to_id[self._src_lang]
a__ : Union[str, Any] =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =self.__dict__.copy()
a__ : Tuple =None
a__ : int =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : List[Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : str ={}
a__ : List[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : str =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
a__ : Any =[1] * len(self.prefix_tokens )
a__ : Tuple =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
'''simple docstring'''
a__ : int =[self.sep_token_id]
a__ : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a__ : Dict =src_lang
a__ : Dict =self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a__ : Union[str, Any] =self.convert_tokens_to_ids(lowerCAmelCase__ )
a__ : Optional[int] =tgt_lang_id
return inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any ={self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowercase ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ : Any =self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowercase ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowercase ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : List[str] ="".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip()
return out_string
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Optional[int] =os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
a__ : Tuple =self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = "en_XX" , lowerCAmelCase__ = None , lowerCAmelCase__ = "ro_RO" , **lowerCAmelCase__ , ) -> BatchEncoding:
'''simple docstring'''
a__ : Union[str, Any] =src_lang
a__ : str =tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Dict =self.lang_code_to_id[src_lang]
a__ : int =[]
a__ : Dict =[self.eos_token_id, self.cur_lang_code]
def _lowercase ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
a__ : Any =self.lang_code_to_id[lang]
a__ : str =[]
a__ : str =[self.eos_token_id, self.cur_lang_code]
| 95 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[str] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : str = 0
while b > 0:
if b & 1:
_lowerCamelCase : List[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 96 |
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
UpperCamelCase__ :int = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
UpperCamelCase__ :Any = model(UpperCamelCase_ )['''last_hidden_state''']
UpperCamelCase__ :Any = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
UpperCamelCase__ :Any = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) ) | 97 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Tuple ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Dict[str, int]] = None ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Union[int, float] = 1 / 255 ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,**lowerCamelCase__ : List[str] ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = size if size is not None else {'shortest_edge': 256}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_resize
UpperCAmelCase__ = size
UpperCAmelCase__ = resample
UpperCAmelCase__ = do_center_crop
UpperCAmelCase__ = crop_size
UpperCAmelCase__ = do_rescale
UpperCAmelCase__ = rescale_factor
UpperCAmelCase__ = do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase__ = get_resize_output_image_size(lowerCamelCase__ ,size=size['shortest_edge'] ,default_to_square=lowerCamelCase__ )
return resize(lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Dict[str, int] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : Tuple ,):
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCamelCase__ ,size=(size['height'], size['width']) ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : float ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : int ):
return rescale(lowerCamelCase__ ,scale=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Union[float, List[float]] ,lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ,**lowerCamelCase__ : List[Any] ,):
return normalize(lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ,data_format=lowerCamelCase__ ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : ImageInput ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : PILImageResampling = None ,lowerCamelCase__ : bool = None ,lowerCamelCase__ : Dict[str, int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[float] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[float, List[float]]] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,lowerCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowerCamelCase__ : Union[str, Any] ,):
UpperCAmelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ = size if size is not None else self.size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,default_to_square=lowerCamelCase__ )
UpperCAmelCase__ = resample if resample is not None else self.resample
UpperCAmelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ = get_size_dict(lowerCamelCase__ ,param_name='crop_size' )
UpperCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ = image_std if image_std is not None else self.image_std
UpperCAmelCase__ = make_list_of_images(lowerCamelCase__ )
if not valid_images(lowerCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
UpperCAmelCase__ = [to_numpy_array(lowerCamelCase__ ) for image in images]
if do_resize:
UpperCAmelCase__ = [self.resize(image=lowerCamelCase__ ,size=lowerCamelCase__ ,resample=lowerCamelCase__ ) for image in images]
if do_center_crop:
UpperCAmelCase__ = [self.center_crop(image=lowerCamelCase__ ,size=lowerCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase__ = [self.rescale(image=lowerCamelCase__ ,scale=lowerCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase__ = [self.normalize(image=lowerCamelCase__ ,mean=lowerCamelCase__ ,std=lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = [to_channel_dimension_format(lowerCamelCase__ ,lowerCamelCase__ ) for image in images]
UpperCAmelCase__ = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Tuple] = None ):
UpperCAmelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowerCamelCase__ ):
UpperCAmelCase__ = target_sizes.numpy()
UpperCAmelCase__ = []
for idx in range(len(lowerCamelCase__ ) ):
UpperCAmelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=lowerCamelCase__ )
UpperCAmelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase__ )
else:
UpperCAmelCase__ = logits.argmax(dim=1 )
UpperCAmelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 98 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : str = tempfile.mkdtemp()
a__ : str = BlipImageProcessor()
a__ : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model')
a__ : int = BlipaProcessor(lowercase , lowercase)
processor.save_pretrained(self.tmpdirname)
def __lowercase ( self , **lowercase) -> int:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).tokenizer
def __lowercase ( self , **lowercase) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase).image_processor
def __lowercase ( self) -> Dict:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ : Dict = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ : Tuple = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__ : str = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : List[str] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = self.prepare_image_inputs()
a__ : Tuple = image_processor(lowercase , return_tensors='np')
a__ : Optional[int] = processor(images=lowercase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : int = self.get_tokenizer()
a__ : Tuple = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Union[str, Any] = 'lower newer'
a__ : str = processor(text=lowercase)
a__ : int = tokenizer(lowercase , return_token_type_ids=lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.get_image_processor()
a__ : List[Any] = self.get_tokenizer()
a__ : Optional[int] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[Any] = 'lower newer'
a__ : Optional[int] = self.prepare_image_inputs()
a__ : Any = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : Tuple = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Dict = processor.batch_decode(lowercase)
a__ : Any = tokenizer.batch_decode(lowercase)
self.assertListEqual(lowercase , lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Optional[Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[Any] = 'lower newer'
a__ : Union[str, Any] = self.prepare_image_inputs()
a__ : Optional[int] = processor(text=lowercase , images=lowercase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
| 99 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0 | 0 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = PhobertTokenizer
__lowercase : Any = False
def snake_case_ ( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__SCREAMING_SNAKE_CASE = ["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l à</w>"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(lowerCAmelCase__))
def snake_case_ ( self , **lowerCAmelCase__):
kwargs.update(self.special_tokens_map)
return PhobertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = """Tôi là VinAI Research"""
__SCREAMING_SNAKE_CASE = """T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__SCREAMING_SNAKE_CASE = """Tôi là VinAI Research"""
__SCREAMING_SNAKE_CASE = """T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
print(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokens + [tokenizer.unk_token]
__SCREAMING_SNAKE_CASE = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , lowerCAmelCase__)
| 100 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''', '''negative_prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_image_processor
a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) ->int:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.image_embeds
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -10:]
a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
a = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 0 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ :Tuple = logging.get_logger(__name__)
lowercase__ :int = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Any ='''time_series_transformer'''
lowercase_ : str ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self ,A__ = None ,A__ = None ,A__ = "student_t" ,A__ = "nll" ,A__ = 1 ,A__ = [1, 2, 3, 4, 5, 6, 7] ,A__ = "mean" ,A__ = 0 ,A__ = 0 ,A__ = 0 ,A__ = 0 ,A__ = None ,A__ = None ,A__ = 3_2 ,A__ = 3_2 ,A__ = 2 ,A__ = 2 ,A__ = 2 ,A__ = 2 ,A__ = True ,A__ = "gelu" ,A__ = 6_4 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 0.1 ,A__ = 1_0_0 ,A__ = 0.02 ,A__=True ,**A__ ,):
# time series specific configuration
lowercase = prediction_length
lowercase = context_length or prediction_length
lowercase = distribution_output
lowercase = loss
lowercase = input_size
lowercase = num_time_features
lowercase = lags_sequence
lowercase = scaling
lowercase = num_dynamic_real_features
lowercase = num_static_real_features
lowercase = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A__) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''')
lowercase = cardinality
else:
lowercase = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A__) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''')
lowercase = embedding_dimension
else:
lowercase = [min(5_0 ,(cat + 1) // 2) for cat in self.cardinality]
lowercase = num_parallel_samples
# Transformer architecture configuration
lowercase = input_size * len(A__) + self._number_of_features
lowercase = d_model
lowercase = encoder_attention_heads
lowercase = decoder_attention_heads
lowercase = encoder_ffn_dim
lowercase = decoder_ffn_dim
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = activation_function
lowercase = init_std
lowercase = use_cache
super().__init__(is_encoder_decoder=A__ ,**A__)
@property
def A__ ( self):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 101 |
from math import factorial
UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def _a ( a :int = 60 , a :int = 1_000_000 ) -> int:
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
a = 0
# the cached sizes of the previous chains
a = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
a = set()
a = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
a = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=2 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=False , a_=True , a_="None" , a_=3 , a_=4 , a_=None , ):
'''simple docstring'''
__snake_case : str = parent
__snake_case : List[str] = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : List[Any] = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : Optional[int] = use_token_type_ids
__snake_case : Optional[int] = use_labels
__snake_case : Dict = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : str = num_hidden_layers
__snake_case : Any = num_attention_heads
__snake_case : Tuple = intermediate_size
__snake_case : int = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : List[Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : Any = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Union[str, Any] = num_labels
__snake_case : Tuple = num_choices
__snake_case : int = relative_attention
__snake_case : List[Any] = position_biased_input
__snake_case : Optional[Any] = pos_att_type
__snake_case : str = scope
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : List[str] = None
if self.use_input_mask:
__snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : List[str] = None
if self.use_token_type_ids:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Optional[int] = None
__snake_case : Union[str, Any] = None
__snake_case : Union[str, Any] = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Dict = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = TFDebertaVaModel(config=a_ )
__snake_case : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__snake_case : int = [input_ids, input_mask]
__snake_case : List[str] = model(a_ )
__snake_case : List[Any] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = TFDebertaVaForMaskedLM(config=a_ )
__snake_case : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : Optional[Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.num_labels
__snake_case : List[Any] = TFDebertaVaForSequenceClassification(config=a_ )
__snake_case : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : int = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Tuple = self.num_labels
__snake_case : Any = TFDebertaVaForTokenClassification(config=a_ )
__snake_case : str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : Dict = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : int = TFDebertaVaForQuestionAnswering(config=a_ )
__snake_case : Optional[Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__snake_case : int = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : List[str] = config_and_inputs
__snake_case : Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( __snake_case, __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(a_ )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='''Model not available yet''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
__snake_case : Tuple = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
__snake_case : Tuple = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : str = model(a_ , attention_mask=a_ )[0]
__snake_case : List[str] = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a_ , atol=1E-4 )
| 102 |
def _a ( a :int = 100 ) -> int:
a = n * (n + 1) * (2 * n + 1) / 6
a = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 0 |
from ....utils import logging
A__ : str = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Any , A_ : Dict , A_ : List[str]=None , A_ : int=2_0_4_8):
lowerCAmelCase_ : Dict = config.__dict__
lowerCAmelCase_ : Dict = modal_hidden_size
if num_labels:
lowerCAmelCase_ : Optional[int] = num_labels
| 103 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Any
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = None ) -> List[str]:
a : Optional[Any] = value
a : Node | None = None # Added in order to delete a node easier
a : Node | None = None
a : Node | None = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = None ) -> Tuple:
a : Dict = root
def __str__( self ) -> str:
return str(self.root )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if new_children is not None: # reset its kids
a : Dict = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
a : Any = new_children
else:
a : Dict = new_children
else:
a : Any = new_children
def __a ( self , lowerCAmelCase__ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __a ( self ) -> bool:
return self.root is None
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
a : Union[str, Any] = new_node # set its root
else: # Tree is not empty
a : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
a : Dict = new_node # We insert the new node in a leaf
break
else:
a : Optional[int] = parent_node.left
else:
if parent_node.right is None:
a : List[str] = new_node
break
else:
a : Tuple = parent_node.right
a : Union[str, Any] = parent_node
def __a ( self , *lowerCAmelCase__ ) -> None:
for value in values:
self.__insert(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
a : Any = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
a : Tuple = node.left if value < node.value else node.right
return node
def __a ( self , lowerCAmelCase__ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
a : List[str] = self.root
if not self.empty():
while node.right is not None:
a : Any = node.right
return node
def __a ( self , lowerCAmelCase__ = None ) -> Node | None:
if node is None:
a : str = self.root
if self.root is None:
return None
if not self.empty():
a : Any = self.root
while node.left is not None:
a : Dict = node.left
return node
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ , lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ , node.left )
else:
a : Union[str, Any] = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
a : List[str] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __a ( self , lowerCAmelCase__ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __a ( self , lowerCAmelCase__=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if node:
self.inorder(lowerCAmelCase__ , node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ , node.right )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : list[int] = []
self.inorder(lowerCAmelCase__ , lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def _SCREAMING_SNAKE_CASE ( _lowercase : Node | None ) ->list[Node]:
'''simple docstring'''
a : List[str] = []
if curr_node is not None:
a : Dict = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Dict = (8, 3, 6, 1, 10, 14, 13, 4, 7)
a : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_lowercase )
# Prints all the elements of the list in order traversal
print(_lowercase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_lowercase )
print(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 105 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Tuple = len(A_ )
while cur > 1:
# Find the maximum number in arr
lowerCAmelCase__ : Union[str, Any] = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCAmelCase__ : List[str] = arr[mi::-1] + arr[mi + 1 : len(A_ )]
# Reverse whole list
lowerCAmelCase__ : Dict = arr[cur - 1 :: -1] + arr[cur : len(A_ )]
cur -= 1
return arr
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 106 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class snake_case__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : str=0.2 , __lowerCamelCase : Union[str, Any]=0.2 ) -> Union[str, Any]:
a = bp_numa
a = bp_numa
a = bp_numa
a = conva_get[:2]
a = conva_get[2]
a = size_pa
a = rate_w
a = rate_t
a = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
a = -2 * np.random.rand(self.conva[1] ) + 1
a = -2 * np.random.rand(self.num_bpa ) + 1
a = -2 * np.random.rand(self.num_bpa ) + 1
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
# save model dict with pickle
a = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(__lowerCamelCase , "wb" ) as f:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
print(f"""Model saved: {save_path}""" )
@classmethod
def __UpperCAmelCase ( cls : int , __lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# read saved model
with open(__lowerCamelCase , "rb" ) as f:
a = pickle.load(__lowerCamelCase ) # noqa: S301
a = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
a = model_dic.get("size_pooling1" )
a = model_dic.get("num_bp1" )
a = model_dic.get("num_bp2" )
a = model_dic.get("num_bp3" )
a = model_dic.get("rate_weight" )
a = model_dic.get("rate_thre" )
# create model instance
a = CNN(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# modify model parameter
a = model_dic.get("w_conv1" )
a = model_dic.get("wkj" )
a = model_dic.get("vji" )
a = model_dic.get("thre_conv1" )
a = model_dic.get("thre_bp2" )
a = model_dic.get("thre_bp3" )
return conv_ins
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : List[str] ) -> str:
return 1 / (1 + np.exp(-1 * x ))
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Dict ) -> Optional[int]:
return round(__lowerCamelCase , 3 )
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Any:
# convolution process
a = convs[0]
a = convs[1]
a = np.shape(__lowerCamelCase )[0]
# get the data slice of original image data, data_focus
a = []
for i_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , __lowerCamelCase ):
a = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowerCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
a = []
a = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowerCamelCase ):
a = []
for i_focus in range(len(__lowerCamelCase ) ):
a = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowerCamelCase ) )
a = np.asmatrix(__lowerCamelCase ).reshape(
__lowerCamelCase , __lowerCamelCase )
data_featuremap.append(__lowerCamelCase )
# expanding the data slice to One dimenssion
a = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowerCamelCase ) )
a = np.asarray(__lowerCamelCase )
return focus_list, data_featuremap
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : List[str]="average_pool" ) -> Tuple:
# pooling process
a = len(featuremaps[0] )
a = int(size_map / size_pooling )
a = []
for i_map in range(len(__lowerCamelCase ) ):
a = featuremaps[i_map]
a = []
for i_focus in range(0 , __lowerCamelCase , __lowerCamelCase ):
for j_focus in range(0 , __lowerCamelCase , __lowerCamelCase ):
a = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowerCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowerCamelCase ) )
a = np.asmatrix(__lowerCamelCase ).reshape(__lowerCamelCase , __lowerCamelCase )
featuremap_pooled.append(__lowerCamelCase )
return featuremap_pooled
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any ) -> Any:
# expanding three dimension data to one dimension list
a = []
for i in range(len(__lowerCamelCase ) ):
a = np.shape(data[i] )
a = data[i].reshape(1 , shapes[0] * shapes[1] )
a = data_listed.getA().tolist()[0]
data_expanded.extend(__lowerCamelCase )
a = np.asarray(__lowerCamelCase )
return data_expanded
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> Tuple:
# expanding matrix to one dimension list
a = np.asarray(__lowerCamelCase )
a = np.shape(__lowerCamelCase )
a = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> str:
a = []
a = 0
for i_map in range(__lowerCamelCase ):
a = np.ones((size_map, size_map) )
for i in range(0 , __lowerCamelCase , __lowerCamelCase ):
for j in range(0 , __lowerCamelCase , __lowerCamelCase ):
a = pd_pool[
i_pool
]
a = i_pool + 1
a = np.multiply(
__lowerCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__lowerCamelCase )
return pd_all
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : Dict=bool ) -> str:
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(__lowerCamelCase )) )
print((" - - Shape: Teach_Data ", np.shape(__lowerCamelCase )) )
a = 0
a = []
a = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
a = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(__lowerCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
a = np.asmatrix(datas_train[p] )
a = np.asarray(datas_teach[p] )
a , a = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a = self.pooling(__lowerCamelCase , self.size_poolinga )
a = np.shape(__lowerCamelCase )
a = self._expand(__lowerCamelCase )
a = data_bp_input
a = np.dot(__lowerCamelCase , self.vji.T ) - self.thre_bpa
a = self.sig(__lowerCamelCase )
a = np.dot(__lowerCamelCase , self.wkj.T ) - self.thre_bpa
a = self.sig(__lowerCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a = np.multiply(
(data_teach - bp_outa) , np.multiply(__lowerCamelCase , (1 - bp_outa) ) )
a = np.multiply(
np.dot(__lowerCamelCase , self.wkj ) , np.multiply(__lowerCamelCase , (1 - bp_outa) ) )
a = np.dot(__lowerCamelCase , self.vji )
a = pd_i_all / (self.size_poolinga * self.size_poolinga)
a = pd_conva_pooled.T.getA().tolist()
a = self._calculate_gradient_from_pool(
__lowerCamelCase , __lowerCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
a = self._expand_mat(pd_conva_all[k_conv] )
a = self.rate_weight * np.dot(__lowerCamelCase , __lowerCamelCase )
a = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
a = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
a = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a = self.thre_bpa - pd_k_all * self.rate_thre
a = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a = rp + 1
a = error_count / patterns
all_mse.append(__lowerCamelCase )
def draw_error():
a = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowerCamelCase , "+-" )
plt.plot(__lowerCamelCase , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(__lowerCamelCase , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def __UpperCAmelCase ( self : Any , __lowerCamelCase : str ) -> Dict:
# model predict
a = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(__lowerCamelCase )) )
for p in range(len(__lowerCamelCase ) ):
a = np.asmatrix(datas_test[p] )
a , a = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a = self.pooling(__lowerCamelCase , self.size_poolinga )
a = self._expand(__lowerCamelCase )
a = data_bp_input
a = bp_outa * self.vji.T - self.thre_bpa
a = self.sig(__lowerCamelCase )
a = bp_outa * self.wkj.T - self.thre_bpa
a = self.sig(__lowerCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
a = [list(map(self.do_round , __lowerCamelCase ) ) for each in produce_out]
return np.asarray(__lowerCamelCase )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : List[str] ) -> Union[str, Any]:
# return the data of image after convoluting process so we can check it out
a = np.asmatrix(__lowerCamelCase )
a , a = self.convolute(
__lowerCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a = self.pooling(__lowerCamelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 107 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple =ComputeEnvironment.AMAZON_SAGEMAKER
a : str =True
a : List[Any] ="ml.p3.2xlarge"
a : List[Any] ="accelerate_sagemaker_execution_role"
a : str ="hf-sm"
a : List[Any] ="us-east-1"
a : Optional[int] =1
a : int ="accelerate-sagemaker-1"
a : Tuple ="1.6"
a : List[Any] ="4.4"
a : Dict ="train.py"
a : Dict =[
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
a : int =[
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , snake_case__ )
assert isinstance(converted_args["do_train"] , snake_case__ )
assert isinstance(converted_args["epochs"] , snake_case__ )
assert isinstance(converted_args["learning_rate"] , snake_case__ )
assert isinstance(converted_args["max_steps"] , snake_case__ )
with pytest.raises(snake_case__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 108 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A: str = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: int = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''t5'''
__snake_case = ['''past_key_values''']
__snake_case = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=32_128 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=128 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=1e-6 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str , ) ->Optional[Any]:
"""simple docstring"""
a = vocab_size
a = d_model
a = d_kv
a = d_ff
a = num_layers
a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a = num_heads
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = dropout_rate
a = layer_norm_epsilon
a = initializer_factor
a = feed_forward_proj
a = use_cache
a = self.feed_forward_proj.split('''-''' )
a = act_info[-1]
a = act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
a = '''past_encoder_sequence + sequence'''
a = {0: '''batch'''}
a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a = {0: '''batch''', 1: '''decoder_sequence'''}
a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return 13
| 0 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
_lowercase : str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowercase : ClassVar[Features] = Features({} )
_lowercase : str = "text"
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.text_column: "text"}
| 110 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_A : str =logging.get_logger(__name__)
_A : List[str] ={
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''',
}
class _lowercase ( _lowercase ):
a = """t5"""
a = ["""past_key_values"""]
a = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self: Optional[Any] , UpperCamelCase__: Optional[Any]=32_128 , UpperCamelCase__: List[Any]=512 , UpperCamelCase__: Dict=64 , UpperCamelCase__: Tuple=2_048 , UpperCamelCase__: int=6 , UpperCamelCase__: Optional[int]=None , UpperCamelCase__: Optional[int]=8 , UpperCamelCase__: str=32 , UpperCamelCase__: Tuple=128 , UpperCamelCase__: Optional[Any]=0.1 , UpperCamelCase__: int=1e-6 , UpperCamelCase__: int=1.0 , UpperCamelCase__: List[str]="relu" , UpperCamelCase__: int=True , UpperCamelCase__: int=True , UpperCamelCase__: List[Any]=0 , UpperCamelCase__: int=1 , **UpperCamelCase__: str , ):
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Union[str, Any] = d_model
lowerCamelCase__ : Any = d_kv
lowerCamelCase__ : List[Any] = d_ff
lowerCamelCase__ : Optional[int] = num_layers
lowerCamelCase__ : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase__ : str = num_heads
lowerCamelCase__ : int = relative_attention_num_buckets
lowerCamelCase__ : Tuple = relative_attention_max_distance
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Optional[Any] = layer_norm_epsilon
lowerCamelCase__ : List[str] = initializer_factor
lowerCamelCase__ : Tuple = feed_forward_proj
lowerCamelCase__ : List[Any] = use_cache
lowerCamelCase__ : Tuple = self.feed_forward_proj.split("""-""" )
lowerCamelCase__ : List[str] = act_info[-1]
lowerCamelCase__ : List[str] = act_info[0] == """gated"""
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""\'gated-gelu\' or \'relu\'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase__ : Optional[Any] = """gelu_new"""
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class _lowercase ( _lowercase ):
@property
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Optional[int] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
lowerCamelCase__ : str = """past_encoder_sequence + sequence"""
lowerCamelCase__ : List[Any] = {0: """batch"""}
lowerCamelCase__ : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase__ : Tuple = {0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase__ : Tuple = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="""inputs""" )
return common_inputs
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
return 13
| 41 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "contact@muhammadumerfarooq.me"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__UpperCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
__UpperCAmelCase = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = "whisper"
UpperCAmelCase_ :Dict = ["past_key_values"]
UpperCAmelCase_ :List[Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=5_1865 , __A=80 , __A=6 , __A=4 , __A=6 , __A=4 , __A=1536 , __A=1536 , __A=0.0 , __A=0.0 , __A=5_0257 , __A=True , __A=True , __A="gelu" , __A=256 , __A=0.0 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=False , __A=1500 , __A=448 , __A=5_0256 , __A=5_0256 , __A=5_0256 , __A=None , __A=[220, 5_0256] , __A=False , __A=256 , __A=False , __A=0.0_5 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A=7 , **__A , ) -> Optional[int]:
lowerCAmelCase_ :Union[str, Any] = vocab_size
lowerCAmelCase_ :Dict = num_mel_bins
lowerCAmelCase_ :Optional[int] = d_model
lowerCAmelCase_ :int = encoder_layers
lowerCAmelCase_ :List[Any] = encoder_attention_heads
lowerCAmelCase_ :str = decoder_layers
lowerCAmelCase_ :Optional[Any] = decoder_attention_heads
lowerCAmelCase_ :Any = decoder_ffn_dim
lowerCAmelCase_ :Union[str, Any] = encoder_ffn_dim
lowerCAmelCase_ :str = dropout
lowerCAmelCase_ :Any = attention_dropout
lowerCAmelCase_ :int = activation_dropout
lowerCAmelCase_ :Dict = activation_function
lowerCAmelCase_ :Union[str, Any] = init_std
lowerCAmelCase_ :Optional[Any] = encoder_layerdrop
lowerCAmelCase_ :int = decoder_layerdrop
lowerCAmelCase_ :Dict = use_cache
lowerCAmelCase_ :Any = encoder_layers
lowerCAmelCase_ :Dict = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ :Union[str, Any] = max_source_positions
lowerCAmelCase_ :Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase_ :Optional[Any] = classifier_proj_size
lowerCAmelCase_ :int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase_ :Union[str, Any] = apply_spec_augment
lowerCAmelCase_ :str = mask_time_prob
lowerCAmelCase_ :Optional[int] = mask_time_length
lowerCAmelCase_ :int = mask_time_min_masks
lowerCAmelCase_ :Dict = mask_feature_prob
lowerCAmelCase_ :int = mask_feature_length
lowerCAmelCase_ :Any = mask_feature_min_masks
lowerCAmelCase_ :Dict = median_filter_width
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , suppress_tokens=__UpperCAmelCase , begin_suppress_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
class _SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase_ :str = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase_ :Optional[int] = {0: """batch"""}
else:
lowerCAmelCase_ :Union[str, Any] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="""inputs""" )
return common_inputs
def __lowerCAmelCase ( self , __A , __A = -1 , __A = -1 , __A = False , __A = None , __A = 2_2050 , __A = 5.0 , __A = 220 , ) -> Mapping[str, Any]:
lowerCAmelCase_ :Union[str, Any] = OrderedDict()
lowerCAmelCase_ :Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__UpperCAmelCase , framework=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , time_duration=__UpperCAmelCase , frequency=__UpperCAmelCase , )
lowerCAmelCase_ :Optional[int] = encoder_inputs["""input_features"""].shape[2]
lowerCAmelCase_ :Optional[int] = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCAmelCase_ :Tuple = super().generate_dummy_inputs(
preprocessor.tokenizer , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase_ :Dict = encoder_inputs.pop("""input_features""" )
lowerCAmelCase_ :int = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
lowerCAmelCase_ :Optional[int] = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-3
| 84 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def _a ( ) -> Optional[int]:
a = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a = parser.parse_args()
return args.f
def _a ( a :Any ) -> Tuple:
a = {}
a = os.path.join(a , '''all_results.json''' )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
a = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
def _a ( ) -> int:
a = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( lowercase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : str ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
a = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''image_classification_no_trainer''' ) ) )
| 0 | 0 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = image.size
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase__ : Optional[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
UpperCAmelCase__ : Dict = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0
UpperCAmelCase__ : int = image[None].transpose(0 , 3 , 1 , 2 )
UpperCAmelCase__ : List[str] = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
super().__init__()
self.register_modules(vqvae=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase)
@torch.no_grad()
def __call__( self , _lowerCamelCase = None , _lowerCamelCase = 1 , _lowerCamelCase = 100 , _lowerCamelCase = 0.0 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ):
if isinstance(__UpperCAmelCase , PIL.Image.Image):
UpperCAmelCase__ : Dict = 1
elif isinstance(__UpperCAmelCase , torch.Tensor):
UpperCAmelCase__ : List[Any] = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__UpperCAmelCase)}''')
if isinstance(__UpperCAmelCase , PIL.Image.Image):
UpperCAmelCase__ : str = preprocess(__UpperCAmelCase)
UpperCAmelCase__ , UpperCAmelCase__ : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCAmelCase__ : int = next(self.unet.parameters()).dtype
UpperCAmelCase__ : Dict = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase)
UpperCAmelCase__ : Any = image.to(device=self.device , dtype=__UpperCAmelCase)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__UpperCAmelCase , device=self.device)
UpperCAmelCase__ : Dict = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase__ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase__ : List[str] = """eta""" in set(inspect.signature(self.scheduler.step).parameters.keys())
UpperCAmelCase__ : str = {}
if accepts_eta:
UpperCAmelCase__ : List[str] = eta
for t in self.progress_bar(__UpperCAmelCase):
# concat latents and low resolution image in the channel dimension.
UpperCAmelCase__ : int = torch.cat([latents, image] , dim=1)
UpperCAmelCase__ : List[Any] = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
# predict the noise residual
UpperCAmelCase__ : int = self.unet(__UpperCAmelCase , __UpperCAmelCase).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase).prev_sample
# decode the image latents with the VQVAE
UpperCAmelCase__ : str = self.vqvae.decode(__UpperCAmelCase).sample
UpperCAmelCase__ : Optional[int] = torch.clamp(__UpperCAmelCase , -1.0 , 1.0)
UpperCAmelCase__ : List[Any] = image / 2 + 0.5
UpperCAmelCase__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase__ : str = self.numpy_to_pil(__UpperCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase) | 163 |
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def a ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : Any=[] ) -> Optional[int]:
"""simple docstring"""
_lowercase =size[0] - overlap_pixels * 2
_lowercase =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
_lowercase =np.ones((size_y, size_x) , dtype=np.uinta ) * 255
_lowercase =np.pad(A__ , mode='linear_ramp' , pad_width=A__ , end_values=0 )
if "l" in remove_borders:
_lowercase =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
_lowercase =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
_lowercase =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
_lowercase =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def a ( A__ : Optional[Any] , A__ : Optional[int] , A__ : Union[str, Any] ) -> Any:
"""simple docstring"""
return max(A__ , min(A__ , A__ ) )
def a ( A__ : [int] , A__ : [int] , A__ : [int] ) -> int:
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def a ( A__ : [int] , A__ : int , A__ : [int] ) -> Optional[int]:
"""simple docstring"""
_lowercase =list(A__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
_lowercase =clamp_rect(A__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def a ( A__ : List[str] , A__ : str , A__ : Optional[int] , A__ : List[Any] ) -> Dict:
"""simple docstring"""
_lowercase =Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(A__ , (original_slice, 0) )
return result
def a ( A__ : Union[str, Any] , A__ : List[Any] ) -> Dict:
"""simple docstring"""
_lowercase =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
_lowercase =tile.crop(A__ )
return tile
def a ( A__ : List[str] , A__ : Optional[Any] ) -> int:
"""simple docstring"""
_lowercase =n % d
return n - divisor
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 350 , ) -> int:
'''simple docstring'''
super().__init__(
vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , max_noise_level=__UpperCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
_lowercase =add_overlap_rect(__UpperCAmelCase , __UpperCAmelCase , image.size )
_lowercase =image.crop(__UpperCAmelCase )
_lowercase =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
_lowercase =translated_slice_x - (original_image_slice / 2)
_lowercase =max(0 , __UpperCAmelCase )
_lowercase =squeeze_tile(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_lowercase =to_input.size
_lowercase =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
_lowercase =super(__UpperCAmelCase , self ).__call__(image=__UpperCAmelCase , **__UpperCAmelCase ).images[0]
_lowercase =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
_lowercase =unsqueeze_tile(__UpperCAmelCase , __UpperCAmelCase )
_lowercase =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
_lowercase =[]
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
_lowercase =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__UpperCAmelCase ) , mode='L' , )
final_image.paste(
__UpperCAmelCase , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __UpperCAmelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 75 , lowerCAmelCase = 9.0 , lowerCAmelCase = 50 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 128 , lowerCAmelCase = 32 , lowerCAmelCase = 32 , ) -> Optional[int]:
'''simple docstring'''
_lowercase =Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
_lowercase =math.ceil(image.size[0] / tile_size )
_lowercase =math.ceil(image.size[1] / tile_size )
_lowercase =tcx * tcy
_lowercase =0
for y in range(__UpperCAmelCase ):
for x in range(__UpperCAmelCase ):
self._process_tile(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , prompt=__UpperCAmelCase , num_inference_steps=__UpperCAmelCase , guidance_scale=__UpperCAmelCase , noise_level=__UpperCAmelCase , negative_prompt=__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , latents=__UpperCAmelCase , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def a ( ) -> Dict:
"""simple docstring"""
_lowercase ='stabilityai/stable-diffusion-x4-upscaler'
_lowercase =StableDiffusionTiledUpscalePipeline.from_pretrained(A__ , revision='fp16' , torch_dtype=torch.floataa )
_lowercase =pipe.to('cuda' )
_lowercase =Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(A__ : str ):
print(F'''progress: {obj["progress"]:.4f}''' )
obj["image"].save('diffusers_library_progress.jpg' )
_lowercase =pipe(image=A__ , prompt='Black font, white background, vector' , noise_level=40 , callback=A__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 205 |
def _a ( a :float , a :float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 0 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__snake_case = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__snake_case = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
__snake_case = [file for file in filepaths if ''' ''' in file]
if space_files:
print(f'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
__snake_case = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(f'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
__snake_case = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
__snake_case = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 348 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Union[str, Any] = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'donut-swin'
__snake_case = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , lowerCAmelCase_ : Optional[Any]=2_24 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Any=96 , lowerCAmelCase_ : Optional[int]=[2, 2, 6, 2] , lowerCAmelCase_ : List[Any]=[3, 6, 12, 24] , lowerCAmelCase_ : Optional[Any]=7 , lowerCAmelCase_ : List[Any]=4.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Union[str, Any]="gelu" , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : int=1e-5 , **lowerCAmelCase_ : Tuple , ) -> int:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
A__ : str =image_size
A__ : Dict =patch_size
A__ : Dict =num_channels
A__ : List[Any] =embed_dim
A__ : Union[str, Any] =depths
A__ : int =len(__UpperCAmelCase )
A__ : Any =num_heads
A__ : Optional[int] =window_size
A__ : str =mlp_ratio
A__ : int =qkv_bias
A__ : List[str] =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : Optional[Any] =drop_path_rate
A__ : Optional[int] =hidden_act
A__ : List[str] =use_absolute_embeddings
A__ : int =layer_norm_eps
A__ : Union[str, Any] =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
A__ : Union[str, Any] =int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
| 134 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ProphetNetTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict:
"""simple docstring"""
a = '''UNwant\u00E9d,running'''
a = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__UpperCAmelCase = get_logger(__name__)
__UpperCAmelCase = Path(__file__).parent / 'model_card_template.md'
__UpperCAmelCase = uuida().hex
__UpperCAmelCase = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__UpperCAmelCase = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__UpperCAmelCase = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def lowercase__ ( __snake_case : Union[Dict, str, None] = None ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = F"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"; torch/{_torch_version}"
if is_flax_available():
ua += F"; jax/{_jax_version}"
ua += F"; flax/{_flax_version}"
if is_onnx_available():
ua += F"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__snake_case , __snake_case ):
ua += "; " + "; ".join(F"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(__snake_case , __snake_case ):
ua += "; " + user_agent
return ua
def lowercase__ ( __snake_case : str , __snake_case : Optional[str] = None , __snake_case : Optional[str] = None ):
'''simple docstring'''
if token is None:
UpperCAmelCase_ : List[str] = HfFolder.get_token()
if organization is None:
UpperCAmelCase_ : int = whoami(__snake_case )['name']
return F"{username}/{model_id}"
else:
return F"{organization}/{model_id}"
def lowercase__ ( __snake_case : List[Any] , __snake_case : Tuple ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(__snake_case , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
UpperCAmelCase_ : Any = args.hub_token if hasattr(__snake_case , 'hub_token' ) else None
UpperCAmelCase_ : Any = get_full_repo_name(__snake_case , token=__snake_case )
UpperCAmelCase_ : List[Any] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__snake_case , model_name=__snake_case , repo_name=__snake_case , dataset_name=args.dataset_name if hasattr(__snake_case , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__snake_case , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(__snake_case , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(__snake_case , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__snake_case , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__snake_case , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__snake_case , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__snake_case , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__snake_case , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(__snake_case , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__snake_case , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
UpperCAmelCase_ : Any = os.path.join(args.output_dir , 'README.md' )
model_card.save(__snake_case )
def lowercase__ ( __snake_case : Optional[str] , __snake_case : Optional[str] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
UpperCAmelCase_ : Any = str(Path(__snake_case ).as_posix() )
UpperCAmelCase_ : Optional[Any] = re.search(R'snapshots/([^/]+)/' , __snake_case )
if search is None:
return None
UpperCAmelCase_ : str = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__UpperCAmelCase = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__UpperCAmelCase = os.path.join(hf_cache_home, 'diffusers')
def lowercase__ ( __snake_case : Optional[str] = None , __snake_case : Optional[str] = None ):
'''simple docstring'''
if new_cache_dir is None:
UpperCAmelCase_ : List[str] = DIFFUSERS_CACHE
if old_cache_dir is None:
UpperCAmelCase_ : Optional[Any] = old_diffusers_cache
UpperCAmelCase_ : List[Any] = Path(__snake_case ).expanduser()
UpperCAmelCase_ : str = Path(__snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
UpperCAmelCase_ : int = new_cache_dir / old_blob_path.relative_to(__snake_case )
new_blob_path.parent.mkdir(parents=__snake_case , exist_ok=__snake_case )
os.replace(__snake_case , __snake_case )
try:
os.symlink(__snake_case , __snake_case )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__UpperCAmelCase = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__UpperCAmelCase = 0
else:
with open(cache_version_file) as f:
try:
__UpperCAmelCase = int(f.read())
except ValueError:
__UpperCAmelCase = 0
if cache_version < 1:
__UpperCAmelCase = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__UpperCAmelCase = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'the directory exists and can be written to.'
)
def lowercase__ ( __snake_case : str , __snake_case : Optional[str] = None ):
'''simple docstring'''
if variant is not None:
UpperCAmelCase_ : List[str] = weights_name.split('.' )
UpperCAmelCase_ : List[Any] = splits[:-1] + [variant] + splits[-1:]
UpperCAmelCase_ : Union[str, Any] = '.'.join(__snake_case )
return weights_name
def lowercase__ ( __snake_case : int , *,
__snake_case : Dict , __snake_case : Any , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : str , __snake_case : str , __snake_case : List[str]=None , ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = str(__snake_case )
if os.path.isfile(__snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(__snake_case ):
if os.path.isfile(os.path.join(__snake_case , __snake_case ) ):
# Load from a PyTorch checkpoint
UpperCAmelCase_ : str = os.path.join(__snake_case , __snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__snake_case , __snake_case , __snake_case ) ):
UpperCAmelCase_ : List[Any] = os.path.join(__snake_case , __snake_case , __snake_case )
return model_file
else:
raise EnvironmentError(
F"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__snake_case ).base_version ) >= version.parse('0.20.0' )
):
try:
UpperCAmelCase_ : Union[str, Any] = hf_hub_download(
__snake_case , filename=_add_variant(__snake_case , __snake_case ) , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , user_agent=__snake_case , subfolder=__snake_case , revision=revision or commit_hash , )
warnings.warn(
F"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , __snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__snake_case , __snake_case )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__snake_case , __snake_case )}' so that the correct variant file can be added." , __snake_case , )
try:
# 2. Load model file as usual
UpperCAmelCase_ : Dict = hf_hub_download(
__snake_case , filename=__snake_case , cache_dir=__snake_case , force_download=__snake_case , proxies=__snake_case , resume_download=__snake_case , local_files_only=__snake_case , use_auth_token=__snake_case , user_agent=__snake_case , subfolder=__snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'this model name. Check the model page at '
F"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
F"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
F"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
F"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
F" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
F" directory containing a file named {weights_name} or"
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
F"containing a file named {weights_name}" )
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str=3 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str=False , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=9_9 , __UpperCamelCase : List[str]=3_2 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[int]=3_7 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Dict=5_1_2 , __UpperCamelCase : Union[str, Any]=1_6 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : List[Any]=None , )->List[str]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowercase__ ( self : List[str] )->Union[str, Any]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Any )->Optional[int]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__UpperCAmelCase , )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int )->List[str]:
_UpperCAmelCase = FalconModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
_UpperCAmelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , )->str:
_UpperCAmelCase = True
_UpperCAmelCase = FalconModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] , )->Optional[Any]:
_UpperCAmelCase = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , )->str:
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = FalconForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , )
_UpperCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
_UpperCAmelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )['''hidden_states'''][0]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def lowercase__ ( self : Any )->Tuple:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
_UpperCAmelCase = FalconModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : str )->Union[str, Any]:
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] )->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] )->Any:
_UpperCAmelCase , *_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_UpperCAmelCase = alibi
self.model_tester.create_and_check_model(__UpperCAmelCase , *__UpperCAmelCase )
def lowercase__ ( self : Dict )->int:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCAmelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : str )->Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''single_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCAmelCase )
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_UpperCAmelCase = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = FalconForCausalLM(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , use_cache=__UpperCAmelCase )
_UpperCAmelCase = input_ids.shape[0]
_UpperCAmelCase = model._convert_to_rw_cache(result.past_key_values )
_UpperCAmelCase = model._convert_cache_to_standard_format(__UpperCAmelCase , __UpperCAmelCase )
for layer in range(len(__UpperCAmelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowercase__ ( self : List[str] )->str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = '''multi_label_classification'''
_UpperCAmelCase = input_dict['''input_ids''']
_UpperCAmelCase = input_ids.ne(1 ).to(__UpperCAmelCase )
_UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_UpperCAmelCase = FalconForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : List[str] )->int:
for model_class in self.all_generative_model_classes:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__UpperCAmelCase , '''use_cache''' ):
return
_UpperCAmelCase = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
if "use_cache" not in inputs:
_UpperCAmelCase = True
_UpperCAmelCase = model(**__UpperCAmelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_UpperCAmelCase = (
getattr(__UpperCAmelCase , '''decoder_layers''' , __UpperCAmelCase )
or getattr(__UpperCAmelCase , '''num_decoder_layers''' , __UpperCAmelCase )
or config.num_hidden_layers
)
_UpperCAmelCase = getattr(__UpperCAmelCase , '''num_kv_heads''' , config.num_attention_heads )
_UpperCAmelCase = getattr(__UpperCAmelCase , '''d_model''' , config.hidden_size )
_UpperCAmelCase = embed_dim // num_attention_heads
_UpperCAmelCase = outputs['''past_key_values''']
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = inputs['''input_ids'''].shape
for i in range(__UpperCAmelCase ):
if config.new_decoder_architecture:
_UpperCAmelCase = config.num_attention_heads
elif config.multi_query:
_UpperCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _a ( unittest.TestCase):
"""simple docstring"""
@slow
def lowercase__ ( self : Dict )->Tuple:
_UpperCAmelCase = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
_UpperCAmelCase = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' )
model.eval()
model.to(__UpperCAmelCase )
_UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
_UpperCAmelCase = (
'''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'''
)
_UpperCAmelCase = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=1_9 )
_UpperCAmelCase = tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] )->List[str]:
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
_UpperCAmelCase = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(__UpperCAmelCase )
_UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 )
model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=4 )
model.generate(**__UpperCAmelCase , num_beams=2 , max_new_tokens=4 )
@slow
def lowercase__ ( self : Tuple )->Dict:
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
_UpperCAmelCase = FalconForCausalLM.from_pretrained(__UpperCAmelCase )
model.eval()
model.to(device=__UpperCAmelCase )
_UpperCAmelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(__UpperCAmelCase )
# Test results are the same with and without cache
_UpperCAmelCase = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=2_0 , use_cache=__UpperCAmelCase )
_UpperCAmelCase = model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=2_0 , use_cache=__UpperCAmelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 260 |
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
A : Optional[int] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
lowercase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A : Tuple = [None] * 1_0_0_0_0_0_0_0
A : int = True
A : Union[str, Any] = False
def UpperCamelCase ( __magic_name__ : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowercase__ = chain(next_number(__magic_name__ ) )
lowercase__ = number_chain
while number < 1000_0000:
lowercase__ = number_chain
number *= 10
return number_chain
def UpperCamelCase ( __magic_name__ : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __magic_name__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 305 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = '▁'
UpperCAmelCase_ : List[Any] = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCAmelCase_ : List[str] = {
'vocab_file': {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'
),
}
}
UpperCAmelCase_ : int = {
'xlm-roberta-base': 512,
'xlm-roberta-large': 512,
'xlm-roberta-large-finetuned-conll02-dutch': 512,
'xlm-roberta-large-finetuned-conll02-spanish': 512,
'xlm-roberta-large-finetuned-conll03-english': 512,
'xlm-roberta-large-finetuned-conll03-german': 512,
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" , SCREAMING_SNAKE_CASE__ : str="</s>" , SCREAMING_SNAKE_CASE__ : int="<s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : str="<mask>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Any , ) -> None:
a_ : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
a_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
a_ : Optional[int] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a_ : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a_ : List[str] = 1
a_ : Any = len(self.sp_model ) + self.fairseq_offset
a_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[str] ) -> Optional[int]:
a_ : Any = self.__dict__.copy()
a_ : int = None
a_ : List[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
a_ : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
a_ : Tuple = {}
a_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a_ : Optional[Any] = [self.cls_token_id]
a_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ) -> List[int]:
a_ : int = [self.sep_token_id]
a_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : Tuple = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : Tuple = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
a_ : str = ''.join(__UpperCAmelCase ).replace(__UpperCAmelCase , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a_ : Any = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
a_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 32 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a_ ( lowerCamelCase : Any ):
return EnvironmentCommand()
def a_ ( lowerCamelCase : Optional[Any] ):
return EnvironmentCommand(args.accelerate_config_file )
class UpperCAmelCase_ ( __lowercase ):
@staticmethod
def __UpperCAmelCase ( UpperCAmelCase__ : ArgumentParser ) -> str:
lowerCAmelCase = parser.add_parser('env' )
download_parser.set_defaults(func=__UpperCAmelCase )
download_parser.add_argument(
'--accelerate-config_file' , default=__UpperCAmelCase , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[Any] ) -> None:
lowerCAmelCase = accelerate_config_file
def __UpperCAmelCase ( self : int ) -> Tuple:
lowerCAmelCase = 'not installed'
if is_safetensors_available():
import safetensors
lowerCAmelCase = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
lowerCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCAmelCase = 'not installed'
lowerCAmelCase = lowerCAmelCase = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__UpperCAmelCase ):
lowerCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else F'''\t{accelerate_config}'''
)
lowerCAmelCase = 'not installed'
lowerCAmelCase = 'NA'
if is_torch_available():
import torch
lowerCAmelCase = torch.__version__
lowerCAmelCase = torch.cuda.is_available()
lowerCAmelCase = 'not installed'
lowerCAmelCase = 'NA'
if is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase = bool(tf.config.list_physical_devices('GPU' ) )
lowerCAmelCase = 'not installed'
lowerCAmelCase = 'not installed'
lowerCAmelCase = 'not installed'
lowerCAmelCase = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase = flax.__version__
lowerCAmelCase = jax.__version__
lowerCAmelCase = jaxlib.__version__
lowerCAmelCase = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F'''{safetensors_version}''',
'Accelerate version': F'''{accelerate_version}''',
'Accelerate config': F'''{accelerate_config_str}''',
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'Tensorflow version (GPU?)': F'''{tf_version} ({tf_cuda_available})''',
'Flax version (CPU?/GPU?/TPU?)': F'''{flax_version} ({jax_backend})''',
'Jax version': F'''{jax_version}''',
'JaxLib version': F'''{jaxlib_version}''',
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(__UpperCAmelCase ) )
return info
@staticmethod
def __UpperCAmelCase ( UpperCAmelCase__ : List[Any] ) -> Any:
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 4 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = 1000 ) -> int:
lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 1
lowerCamelCase__ : Optional[Any] = []
for i in range(1 , n + 1 ):
lowerCamelCase__ : List[Any] = prev_numerator + 2 * prev_denominator
lowerCamelCase__ : str = prev_numerator + prev_denominator
if len(str(UpperCamelCase ) ) > len(str(UpperCamelCase ) ):
result.append(UpperCamelCase )
lowerCamelCase__ : List[str] = numerator
lowerCamelCase__ : Union[str, Any] = denominator
return len(UpperCamelCase )
if __name__ == "__main__":
print(F'{solution() = }')
| 41 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''', '''negative_prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_image_processor
a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) ->int:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.image_embeds
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -10:]
a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
a = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 0 | 0 |
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _snake_case ( lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Tuple , lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
with open(lowercase__ ) as metadata_file:
lowerCAmelCase_ :Any = json.load(lowercase__ )
lowerCAmelCase_ :List[str] = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
lowerCAmelCase_ :List[str] = torch.load(lowercase__ , map_location="""cpu""" )
# Load the entity vocab file
lowerCAmelCase_ :Tuple = load_entity_vocab(lowercase__ )
lowerCAmelCase_ :str = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase_ :Optional[int] = AddedToken("""<ent>""" , lstrip=lowercase__ , rstrip=lowercase__ )
lowerCAmelCase_ :Optional[Any] = AddedToken("""<ent2>""" , lstrip=lowercase__ , rstrip=lowercase__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase__ )
with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
lowerCAmelCase_ :Any = LukeTokenizer.from_pretrained(lowercase__ )
# Initialize the embeddings of the special tokens
lowerCAmelCase_ :Dict = state_dict["""embeddings.word_embeddings.weight"""]
lowerCAmelCase_ :Tuple = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
lowerCAmelCase_ :Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
lowerCAmelCase_ :Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase_ :Tuple = f"""encoder.layer.{layer_index}.attention.self."""
lowerCAmelCase_ :Tuple = state_dict[prefix + matrix_name]
lowerCAmelCase_ :Dict = state_dict[prefix + matrix_name]
lowerCAmelCase_ :Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase_ :Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowerCAmelCase_ :Optional[Any] = entity_emb[entity_vocab["""[MASK]"""]]
lowerCAmelCase_ :Union[str, Any] = LukeModel(config=lowercase__ ).eval()
lowerCAmelCase_ , lowerCAmelCase_ :int = model.load_state_dict(lowercase__ , strict=lowercase__ )
if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"""Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
f""" {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}""" )
# Check outputs
lowerCAmelCase_ :int = LukeTokenizer.from_pretrained(lowercase__ , task="""entity_classification""" )
lowerCAmelCase_ :Optional[int] = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
lowerCAmelCase_ :Tuple = (3_9, 4_2)
lowerCAmelCase_ :Optional[Any] = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors="""pt""" )
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
# Verify word hidden states
if model_size == "large":
lowerCAmelCase_ :Dict = torch.Size((1, 4_2, 1_0_2_4) )
lowerCAmelCase_ :Optional[int] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
lowerCAmelCase_ :List[Any] = torch.Size((1, 4_2, 7_6_8) )
lowerCAmelCase_ :Optional[Any] = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCAmelCase_ :List[Any] = torch.Size((1, 1, 1_0_2_4) )
lowerCAmelCase_ :List[str] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
lowerCAmelCase_ :str = torch.Size((1, 1, 7_6_8) )
lowerCAmelCase_ :Optional[int] = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowercase__ ) )
model.save_pretrained(lowercase__ )
def _snake_case ( lowercase__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = {}
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
for index, line in enumerate(lowercase__ ):
lowerCAmelCase_ , lowerCAmelCase_ :Any = line.rstrip().split("""\t""" )
lowerCAmelCase_ :Optional[int] = index
return entity_vocab
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__UpperCAmelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 84 |
from math import factorial
UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def _a ( a :int = 60 , a :int = 1_000_000 ) -> int:
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
a = 0
# the cached sizes of the previous chains
a = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
a = set()
a = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
a = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A =logging.get_logger(__name__)
__A ={
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _snake_case ( a__ ):
lowerCAmelCase :Any = '''gpt_neo'''
lowerCAmelCase :Optional[int] = ['''past_key_values''']
lowerCAmelCase :List[str] = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , _lowerCamelCase=5_0257 , _lowerCamelCase=2048 , _lowerCamelCase=2048 , _lowerCamelCase=24 , _lowerCamelCase=[[["global", "local"], 12]] , _lowerCamelCase=16 , _lowerCamelCase=None , _lowerCamelCase=256 , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.1 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=5_0256 , _lowerCamelCase=5_0256 , **_lowerCamelCase , ):
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Tuple = num_layers
UpperCAmelCase__ : str = num_heads
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Dict = window_size
UpperCAmelCase__ : Any = activation_function
UpperCAmelCase__ : int = resid_dropout
UpperCAmelCase__ : List[str] = embed_dropout
UpperCAmelCase__ : int = attention_dropout
UpperCAmelCase__ : List[Any] = classifier_dropout
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : List[str] = use_cache
UpperCAmelCase__ : str = bos_token_id
UpperCAmelCase__ : List[Any] = eos_token_id
UpperCAmelCase__ : Tuple = attention_types
UpperCAmelCase__ : List[str] = self.expand_attention_types_params(__UpperCAmelCase)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""")
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
@staticmethod
def snake_case__ ( _lowerCamelCase):
UpperCAmelCase__ : List[str] = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
import torch
UpperCAmelCase__ : Any = input.size()
UpperCAmelCase__ : Tuple = len(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = shape[dimension]
UpperCAmelCase__ : str = torch.arange(0 , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : str = torch.div(sizedim - size , UpperCamelCase__ , rounding_mode="""floor""" ) + 1
UpperCAmelCase__ : Optional[Any] = torch.arange(UpperCamelCase__ ) + low_indices[:min_length][:, None]
UpperCAmelCase__ : Dict = [slice(UpperCamelCase__ )] * rank
UpperCAmelCase__ : int = indices
UpperCAmelCase__ : Any = input[s]
UpperCAmelCase__ : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase__ )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
import torch
UpperCAmelCase__ : Optional[Any] = torch.arange(1 , UpperCamelCase__ )
UpperCAmelCase__ : str = torch.remainder(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = remainders == 0
UpperCAmelCase__ : List[str] = candidates[divisor_indices]
UpperCAmelCase__ : List[Any] = torch.max(UpperCamelCase__ )
return largest_divisor, torch.div(UpperCamelCase__ , UpperCamelCase__ , rounding_mode="""floor""" )
class _snake_case ( a__ ):
@property
def snake_case__ ( self):
UpperCAmelCase__ : Dict = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}})
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="""inputs""")
UpperCAmelCase__ : Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
UpperCAmelCase__ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def snake_case__ ( self):
return self._config.num_heads
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ):
UpperCAmelCase__ : Any = super(__UpperCAmelCase , self).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
# We need to order the input in the way they appears in the forward()
UpperCAmelCase__ : Optional[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""")
else:
import torch
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCAmelCase__ : Any = seqlen + 2
UpperCAmelCase__ : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCAmelCase__ : List[Any] = [
(torch.zeros(__UpperCAmelCase), torch.zeros(__UpperCAmelCase)) for _ in range(self.num_layers)
]
UpperCAmelCase__ : Optional[int] = common_inputs["""attention_mask"""]
if self.use_past:
UpperCAmelCase__ : Any = ordered_inputs["""attention_mask"""].dtype
UpperCAmelCase__ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase)] , dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
return 13 | 163 |
def _a ( a :int = 100 ) -> int:
a = n * (n + 1) * (2 * n + 1) / 6
a = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , 'num_attention_heads' ) )
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=640 , lowerCAmelCase=4 , lowerCAmelCase="silu" , lowerCAmelCase=3 , lowerCAmelCase=32 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=10 , lowerCAmelCase=None , ) -> Any:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =last_hidden_size
_lowercase =num_attention_heads
_lowercase =hidden_act
_lowercase =conv_kernel_size
_lowercase =output_stride
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =classifier_dropout_prob
_lowercase =use_labels
_lowercase =is_training
_lowercase =num_labels
_lowercase =initializer_range
_lowercase =scope
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.num_labels )
_lowercase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowercase =self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self ) -> int:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
'''simple docstring'''
_lowercase =MobileViTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowercase =model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =self.num_labels
_lowercase =MobileViTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowercase =model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.num_labels
_lowercase =MobileViTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
_lowercase =model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowercase =model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase =config_and_inputs
_lowercase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_a = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =MobileViTModelTester(self )
_lowercase =MobileViTConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(__UpperCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
_lowercase =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
_lowercase =outputs.hidden_states
_lowercase =5
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase =2
for i in range(len(__UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase =True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase =MobileViTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def a ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__UpperCAmelCase )
_lowercase =self.default_image_processor
_lowercase =prepare_img()
_lowercase =image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**__UpperCAmelCase )
# verify the logits
_lowercase =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
_lowercase =torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_lowercase =model.to(__UpperCAmelCase )
_lowercase =MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_lowercase =prepare_img()
_lowercase =image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**__UpperCAmelCase )
_lowercase =outputs.logits
# verify the logits
_lowercase =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __UpperCAmelCase )
_lowercase =torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_lowercase =model.to(__UpperCAmelCase )
_lowercase =MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_lowercase =prepare_img()
_lowercase =image_processor(images=__UpperCAmelCase , return_tensors='pt' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
_lowercase =model(**__UpperCAmelCase )
_lowercase =outputs.logits.detach().cpu()
_lowercase =image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)] )
_lowercase =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
_lowercase =image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
_lowercase =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 205 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
__snake_case = logging.get_logger(__name__)
def lowerCAmelCase_ ( __lowerCAmelCase )-> List[int]:
'''simple docstring'''
if isinstance(__lowerCAmelCase , np.ndarray ):
return list(tensor.shape )
UpperCAmelCase : Optional[int] =tf.shape(__lowerCAmelCase )
if tensor.shape == tf.TensorShape(__lowerCAmelCase ):
return dynamic
UpperCAmelCase : Union[str, Any] =tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCAmelCase )]
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None )-> tf.Tensor:
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=__lowerCAmelCase , name=__lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1e-5 , __lowerCAmelCase=-1 )-> Any:
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' )
# Get mean and variance on the axis to be normalized
UpperCAmelCase , UpperCAmelCase : Dict =tf.nn.moments(__lowerCAmelCase , axes=[axis] , keepdims=__lowerCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
UpperCAmelCase : str =[1] * inputs.shape.rank
UpperCAmelCase : Any =shape_list(__lowerCAmelCase )[axis]
UpperCAmelCase : str =tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Optional[Any] =tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
UpperCAmelCase : Optional[Any] =tf.nn.batch_normalization(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , offset=__lowerCAmelCase , scale=__lowerCAmelCase , variance_epsilon=__lowerCAmelCase , )
return outputs
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=-1 )-> List[Any]:
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
UpperCAmelCase : Dict =tf.shape(__lowerCAmelCase )
UpperCAmelCase : Dict =tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
UpperCAmelCase : List[str] =tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase_ ( __lowerCAmelCase )-> tf.Tensor:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , tf.Tensor ):
UpperCAmelCase : Union[str, Any] =tf.convert_to_tensor(__lowerCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
UpperCAmelCase : Tuple =encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
UpperCAmelCase : int =encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
UpperCAmelCase : Union[str, Any] =(
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "input_ids" )-> None:
'''simple docstring'''
tf.debugging.assert_less(
__lowerCAmelCase , tf.cast(__lowerCAmelCase , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCAmelCase )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : List[str] =6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
UpperCAmelCase : str =[x for x in data if len(__lowerCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'''The following attributes cannot be saved to HDF5 file because '''
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
UpperCAmelCase : Any =np.asarray(__lowerCAmelCase )
UpperCAmelCase : List[str] =1
UpperCAmelCase : Dict =np.array_split(__lowerCAmelCase , __lowerCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
UpperCAmelCase : List[str] =np.array_split(__lowerCAmelCase , __lowerCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCAmelCase ):
UpperCAmelCase : Tuple =chunk_data
else:
UpperCAmelCase : Optional[int] =data
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
if name in group.attrs:
UpperCAmelCase : Tuple =[n.decode('''utf8''' ) if hasattr(__lowerCAmelCase , '''decode''' ) else n for n in group.attrs[name]]
else:
UpperCAmelCase : Any =[]
UpperCAmelCase : Optional[Any] =0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('''utf8''' ) if hasattr(__lowerCAmelCase , '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
def _expand_single_ad_tensor(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCAmelCase )
| 348 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ : int =[]
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
f"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
f"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
f"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
f"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
f"stage{idx}.patch_embed.norm.bias",
) )
return embed
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A__ : Any =[]
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
f"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
f"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
f"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
f"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
f"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
f"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", f"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", f"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", f"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", f"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", f"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(f"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", f"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def __lowerCamelCase ( __snake_case : Any ) -> List[Any]:
"""simple docstring"""
A__ : Any =[]
token.append((f"cvt.encoder.stages.{idx}.cls_token", """stage2.cls_token""") )
return token
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
A__ : Optional[int] =[]
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Optional[int], __snake_case : List[Any], __snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ : Any ="""imagenet-1k-id2label.json"""
A__ : Dict =1_000
A__ : Union[str, Any] ="""huggingface/label-files"""
A__ : Optional[int] =num_labels
A__ : int =json.load(open(cached_download(hf_hub_url(__snake_case, __snake_case, repo_type="""dataset""" ) ), """r""" ) )
A__ : Optional[Any] ={int(__snake_case ): v for k, v in idalabel.items()}
A__ : str =idalabel
A__ : List[str] ={v: k for k, v in idalabel.items()}
A__ : Dict =CvtConfig(num_labels=__snake_case, idalabel=__snake_case, labelaid=__snake_case )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""", 1 )[-1][4:6] == "13":
A__ : Optional[Any] =[1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""", 1 )[-1][4:6] == "21":
A__ : Any =[1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A__ : Optional[int] =[2, 2, 20]
A__ : str =[3, 12, 16]
A__ : Tuple =[192, 768, 1_024]
A__ : int =CvtForImageClassification(__snake_case )
A__ : str =AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
A__ : Optional[int] =image_size
A__ : Tuple =torch.load(__snake_case, map_location=torch.device("""cpu""" ) )
A__ : List[str] =OrderedDict()
A__ : Optional[Any] =[]
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A__ : Union[str, Any] =list_of_state_dict + cls_token(__snake_case )
A__ : Any =list_of_state_dict + embeddings(__snake_case )
for cnt in range(config.depth[idx] ):
A__ : List[str] =list_of_state_dict + attention(__snake_case, __snake_case )
A__ : List[str] =list_of_state_dict + final()
for gg in list_of_state_dict:
print(__snake_case )
for i in range(len(__snake_case ) ):
A__ : Optional[int] =original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case : Any = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=384,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__snake_case : Union[str, Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 134 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Dict ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Dict = np.full((len(__snake_case ), sequence_length, 2) , __snake_case )
else:
UpperCAmelCase_ : Dict = np.full((len(__snake_case ), sequence_length) , __snake_case )
for i, tensor in enumerate(__snake_case ):
if padding_side == "right":
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length]
else:
UpperCAmelCase_ : str = tensor[:sequence_length]
else:
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase_ : str = tensor[:sequence_length]
else:
UpperCAmelCase_ : Union[str, Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase__ ( __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = ord(__snake_case )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
UpperCAmelCase_ : str = unicodedata.category(__snake_case )
if cat.startswith('P' ):
return True
return False
@dataclass
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Optional[Any] = 4_2
_snake_case : List[str] = True
_snake_case : List[Any] = None
_snake_case : int = None
_snake_case : Union[str, Any] = -1_0_0
_snake_case : Optional[int] = '''pt'''
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
import torch
UpperCAmelCase_ : Dict = 'label' if 'label' in features[0].keys() else 'labels'
UpperCAmelCase_ : Any = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
UpperCAmelCase_ : str = self.tokenizer.pad(
__UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
UpperCAmelCase_ : Dict = torch.tensor(batch['entity_ids'] ).shape[1]
UpperCAmelCase_ : int = self.tokenizer.padding_side
if padding_side == "right":
UpperCAmelCase_ : Optional[int] = [
list(__UpperCAmelCase ) + [self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) for label in labels
]
else:
UpperCAmelCase_ : Optional[Any] = [
[self.label_pad_token_id] * (sequence_length - len(__UpperCAmelCase )) + list(__UpperCAmelCase ) for label in labels
]
UpperCAmelCase_ : Optional[int] = [feature['ner_tags'] for feature in features]
UpperCAmelCase_ : Union[str, Any] = padding_tensor(__UpperCAmelCase , -1 , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : List[str] = [feature['original_entity_spans'] for feature in features]
UpperCAmelCase_ : List[Any] = padding_tensor(__UpperCAmelCase , (-1, -1) , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(__UpperCAmelCase , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__A : List[str] = None
__A : Dict = logging.get_logger(__name__)
__A : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__A : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
__A : str = {
"camembert-base": 512,
}
__A : Optional[int] = "▁"
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
UpperCamelCase__ = CamembertTokenizer
def __init__( self : Dict , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : str="<s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : List[Any]="</s>" , __UpperCamelCase : List[Any]="<s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : Optional[Any]="<mask>" , __UpperCamelCase : Optional[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **__UpperCamelCase : int , )->List[Any]:
_UpperCAmelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def lowercase__ ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None )->List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 260 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
A : Optional[Any] = logging.get_logger(__name__)
# General docstring
A : List[str] = 'PoolFormerConfig'
# Base docstring
A : Union[str, Any] = 'sail/poolformer_s12'
A : List[str] = [1, 5_1_2, 7, 7]
# Image classification docstring
A : Any = 'sail/poolformer_s12'
A : Any = 'tabby, tabby cat'
A : Dict = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : float = 0.0 , __magic_name__ : bool = False ) -> Optional[Any]:
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
lowercase__ = 1 - drop_prob
lowercase__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase__ = keep_prob + torch.rand(__magic_name__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowercase__ = input.div(__magic_name__ ) * random_tensor
return output
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : int , _UpperCAmelCase : Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ = drop_prob
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__UpperCAmelCase , self.drop_prob , self.training )
def lowerCamelCase__ (self : int ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Any=None ) -> int:
"""simple docstring"""
super().__init__()
lowercase__ = patch_size if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase__ = stride if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (stride, stride)
lowercase__ = padding if isinstance(__UpperCAmelCase , collections.abc.Iterable ) else (padding, padding)
lowercase__ = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , kernel_size=__UpperCAmelCase , stride=__UpperCAmelCase , padding=__UpperCAmelCase )
lowercase__ = norm_layer(__UpperCAmelCase ) if norm_layer else nn.Identity()
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.projection(__UpperCAmelCase )
lowercase__ = self.norm(__UpperCAmelCase )
return embeddings
class A ( nn.GroupNorm ):
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __UpperCAmelCase , **__UpperCAmelCase )
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : int , _UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.AvgPoolad(__UpperCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=__UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.pool(__UpperCAmelCase ) - hidden_states
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
lowercase__ = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
lowercase__ = PoolFormerDropPath(__UpperCAmelCase )
if isinstance(config.hidden_act , __UpperCAmelCase ):
lowercase__ = ACTaFN[config.hidden_act]
else:
lowercase__ = config.hidden_act
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.conva(__UpperCAmelCase )
lowercase__ = self.act_fn(__UpperCAmelCase )
lowercase__ = self.drop(__UpperCAmelCase )
lowercase__ = self.conva(__UpperCAmelCase )
lowercase__ = self.drop(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : int , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
super().__init__()
lowercase__ = PoolFormerPooling(__UpperCAmelCase )
lowercase__ = PoolFormerOutput(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowercase__ = PoolFormerGroupNorm(__UpperCAmelCase )
lowercase__ = PoolFormerGroupNorm(__UpperCAmelCase )
# Useful for training neural nets
lowercase__ = PoolFormerDropPath(__UpperCAmelCase ) if drop_path > 0.0 else nn.Identity()
lowercase__ = config.use_layer_scale
if config.use_layer_scale:
lowercase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
lowercase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__UpperCAmelCase) ) , requires_grad=__UpperCAmelCase )
def lowerCamelCase__ (self : Any , _UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
if self.use_layer_scale:
lowercase__ = self.pooling(self.before_norm(__UpperCAmelCase ) )
lowercase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase__ = hidden_states + self.drop_path(__UpperCAmelCase )
lowercase__ = ()
lowercase__ = self.output(self.after_norm(__UpperCAmelCase ) )
lowercase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase__ = hidden_states + self.drop_path(__UpperCAmelCase )
lowercase__ = (output,) + outputs
return outputs
else:
lowercase__ = self.drop_path(self.pooling(self.before_norm(__UpperCAmelCase ) ) )
# First residual connection
lowercase__ = pooling_output + hidden_states
lowercase__ = ()
# Second residual connection inside the PoolFormerOutput block
lowercase__ = self.drop_path(self.output(self.after_norm(__UpperCAmelCase ) ) )
lowercase__ = hidden_states + layer_output
lowercase__ = (output,) + outputs
return outputs
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : List[Any] , _UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = config
# stochastic depth decay rule
lowercase__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase__ = nn.ModuleList(__UpperCAmelCase )
# Transformer blocks
lowercase__ = []
lowercase__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__UpperCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__UpperCAmelCase ) )
lowercase__ = nn.ModuleList(__UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Tuple=True ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = () if output_hidden_states else None
lowercase__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase__ , lowercase__ = layers
# Get patch embeddings from hidden_states
lowercase__ = embedding_layer(__UpperCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(__UpperCAmelCase ):
lowercase__ = blk(__UpperCAmelCase )
lowercase__ = layer_outputs[0]
if output_hidden_states:
lowercase__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__UpperCAmelCase , hidden_states=__UpperCAmelCase )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = PoolFormerConfig
A__ = '''poolformer'''
A__ = '''pixel_values'''
A__ = True
def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__UpperCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=False ) -> int:
"""simple docstring"""
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowercase__ = value
A : int = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
A : int = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , UpperCAmelCase__ , )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : str , _UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
lowercase__ = config
lowercase__ = PoolFormerEncoder(__UpperCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def lowerCamelCase__ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase__ (self : str , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowercase__ = self.encoder(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
lowercase__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class A ( nn.Module ):
'''simple docstring'''
def __init__(self : Optional[int] , _UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCamelCase__ (self : int , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.dense(__UpperCAmelCase )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , UpperCAmelCase__ , )
class A ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : Union[str, Any] , _UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
super().__init__(__UpperCAmelCase )
lowercase__ = config.num_labels
lowercase__ = PoolFormerModel(__UpperCAmelCase )
# Final norm
lowercase__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.poolformer(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , return_dict=__UpperCAmelCase , )
lowercase__ = outputs[0]
lowercase__ = self.classifier(self.norm(__UpperCAmelCase ).mean([-2, -1] ) )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = """single_label_classification"""
else:
lowercase__ = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states )
| 305 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCAmelCase_ : Dict = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def SCREAMING_SNAKE_CASE_ ( __A : int=None ) -> Optional[Any]:
"""simple docstring"""
if subparsers is not None:
a_ : Optional[Any] = subparsers.add_parser('tpu-config' , description=_description )
else:
a_ : Dict = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
a_ : Optional[int] = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__A , default=__A , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__A , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__A , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
a_ : str = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__A , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__A )
return parser
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> str:
"""simple docstring"""
a_ : Optional[Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__A ):
a_ : Optional[int] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a_ : Dict = defaults.command_file
if not args.command and defaults.commands is not None:
a_ : List[Any] = defaults.commands
if not args.tpu_name:
a_ : Union[str, Any] = defaults.tpu_name
if not args.tpu_zone:
a_ : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a_ : int = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
a_ : Any = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __A ):
a_ : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
a_ : Union[str, Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __A ):
a_ : List[str] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a_ : Union[str, Any] = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a_ : List[str] = '; '.join(__A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a_ : List[str] = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {" ".join(__A )}""" )
return
subprocess.run(__A )
print('Successfully setup pod.' )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[int] = tpu_command_parser()
a_ : Dict = parser.parse_args()
tpu_command_launcher(__A )
| 32 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''t5'''
__snake_case = ['''past_key_values''']
__snake_case = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=32_128 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=128 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=1e-6 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str , ) ->Optional[Any]:
"""simple docstring"""
a = vocab_size
a = d_model
a = d_kv
a = d_ff
a = num_layers
a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a = num_heads
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = dropout_rate
a = layer_norm_epsilon
a = initializer_factor
a = feed_forward_proj
a = use_cache
a = self.feed_forward_proj.split('''-''' )
a = act_info[-1]
a = act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
a = '''past_encoder_sequence + sequence'''
a = {0: '''batch'''}
a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a = {0: '''batch''', 1: '''decoder_sequence'''}
a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return 13
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__snake_case =TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__snake_case =TaTokenizerFast
__snake_case ={"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__snake_case =_LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 4 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
_A : Tuple =''''''
_A : str =''''''
_A : Dict =''''''
_A : List[Any] =''''''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> None:
# authorize twitter, initialize tweepy
lowerCamelCase__ : List[Any] = tweepy.OAuthHandler(UpperCamelCase , UpperCamelCase )
auth.set_access_token(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = tweepy.API(UpperCamelCase )
# initialize a list to hold all the tweepy Tweets
lowerCamelCase__ : List[str] = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCamelCase__ : Optional[int] = api.user_timeline(screen_name=UpperCamelCase , count=200 )
# save most recent tweets
alltweets.extend(UpperCamelCase )
# save the id of the oldest tweet less one
lowerCamelCase__ : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(UpperCamelCase ) > 0:
print(f'''getting tweets before {oldest}''' )
# all subsequent requests use the max_id param to prevent duplicates
lowerCamelCase__ : Any = api.user_timeline(
screen_name=UpperCamelCase , count=200 , max_id=UpperCamelCase )
# save most recent tweets
alltweets.extend(UpperCamelCase )
# update the id of the oldest tweet less one
lowerCamelCase__ : Optional[Any] = alltweets[-1].id - 1
print(f'''...{len(UpperCamelCase )} tweets downloaded so far''' )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCamelCase__ : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f'''new_{screen_name}_tweets.csv''' , """w""" ) as f:
lowerCamelCase__ : str = csv.writer(UpperCamelCase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(UpperCamelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 41 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "contact@muhammadumerfarooq.me"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 0 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCAmelCase = pytest.mark.integration
@require_faiss
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[str] = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self ) -> Any:
import faiss
lowerCAmelCase_ :int = self._create_dummy_dataset()
lowerCAmelCase_ :List[str] = dset.map(
lambda __A , __A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
lowerCAmelCase_ :List[Any] = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
import faiss
lowerCAmelCase_ :Any = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
import faiss
lowerCAmelCase_ :List[str] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :List[str] = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self ) -> List[str]:
from elasticsearch import Elasticsearch
lowerCAmelCase_ :Optional[Any] = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCAmelCase_ :List[Any] = {"""acknowledged""": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase_ :Tuple = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 29}]}}
lowerCAmelCase_ :Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=__UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> Any:
import faiss
lowerCAmelCase_ :Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase_ :Union[str, Any] = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase_ :Union[str, Any] = 1
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase_ :Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
lowerCAmelCase_ :Optional[Any] = [scores[0] for scores in total_scores]
lowerCAmelCase_ :str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self ) -> List[Any]:
import faiss
lowerCAmelCase_ :Optional[Any] = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase_ :Tuple = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase_ :List[str] = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
import faiss
lowerCAmelCase_ :Optional[int] = faiss.IndexFlat(5 )
lowerCAmelCase_ :List[Any] = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self ) -> Dict:
import faiss
lowerCAmelCase_ :Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase_ :List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase_ :Any = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase_ :Union[str, Any] = 1
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _snake_case ( lowercase__ : Dict ) -> Any:
'''simple docstring'''
import faiss
lowerCAmelCase_ :str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase_ :Union[str, Any] = """index.faiss"""
lowerCAmelCase_ :int = f"""mock://{index_name}"""
index.save(lowercase__ , storage_options=mockfs.storage_options )
lowerCAmelCase_ :Any = FaissIndex.load(lowercase__ , storage_options=mockfs.storage_options )
lowerCAmelCase_ :Tuple = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase_ :Union[str, Any] = 1
lowerCAmelCase_ , lowerCAmelCase_ :int = index.search(lowercase__ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
lowerCAmelCase_ :List[str] = Elasticsearch()
lowerCAmelCase_ :Optional[Any] = {"""acknowledged""": True}
lowerCAmelCase_ :str = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
lowerCAmelCase_ :Any = """foo"""
lowerCAmelCase_ :Any = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCAmelCase_ , lowerCAmelCase_ :int = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase_ :int = """foo"""
lowerCAmelCase_ :Any = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 0}]}}
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase_ :Optional[int] = ["""foo""", """bar""", """foobar"""]
lowerCAmelCase_ :int = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = index.search_batch(__UpperCAmelCase )
lowerCAmelCase_ :Optional[Any] = [scores[0] for scores in total_scores]
lowerCAmelCase_ :int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
lowerCAmelCase_ :int = ["""foo""", """bar""", """foobar"""]
lowerCAmelCase_ :Any = {"""hits""": {"""hits""": [{"""_score""": 1, """_id""": 1}]}}
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = index.search_batch(__UpperCAmelCase , request_timeout=30 )
lowerCAmelCase_ :Union[str, Any] = [scores[0] for scores in total_scores]
lowerCAmelCase_ :Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 84 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def _a ( ) -> Optional[int]:
a = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a = parser.parse_args()
return args.f
def _a ( a :Any ) -> Tuple:
a = {}
a = os.path.join(a , '''all_results.json''' )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
a = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
def _a ( ) -> int:
a = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( lowercase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : str ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
a = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''image_classification_no_trainer''' ) ) )
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 163 |
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
import fire
from utils import calculate_rouge, save_json
def a ( A__ : Optional[int] , A__ : Tuple , A__ : Tuple=None , **A__ : Union[str, Any] ) -> Any:
"""simple docstring"""
_lowercase =[x.strip() for x in open(A__ ).readlines()]
_lowercase =[x.strip() for x in open(A__ ).readlines()][: len(A__ )]
_lowercase =calculate_rouge(A__ , A__ , **A__ )
if save_path is not None:
save_json(A__ , A__ , indent=A__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 205 |
def _a ( a :float , a :float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 0 | 0 |
__snake_case = tuple[float, float, float]
__snake_case = tuple[float, float, float]
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Vectorad:
'''simple docstring'''
UpperCAmelCase : int =end_pointa[0] - end_pointa[0]
UpperCAmelCase : str =end_pointa[1] - end_pointa[1]
UpperCAmelCase : str =end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Vectorad:
'''simple docstring'''
UpperCAmelCase : List[Any] =ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase : Tuple =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase : List[str] =ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> bool:
'''simple docstring'''
return tuple(round(__lowerCAmelCase , __lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10 )-> bool:
'''simple docstring'''
UpperCAmelCase : Dict =create_vector(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Optional[Any] =create_vector(__lowerCAmelCase , __lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
| 348 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Dict = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'swin2sr'
__snake_case = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : int , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=1_80 , lowerCAmelCase_ : Union[str, Any]=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : Optional[Any]=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_ : Union[str, Any]=8 , lowerCAmelCase_ : Union[str, Any]=2.0 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple=0.02 , lowerCAmelCase_ : Optional[Any]=1e-5 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Union[str, Any]=1.0 , lowerCAmelCase_ : List[Any]="1conv" , lowerCAmelCase_ : int="pixelshuffle" , **lowerCAmelCase_ : str , ) -> Tuple:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
A__ : List[str] =image_size
A__ : Any =patch_size
A__ : Dict =num_channels
A__ : Union[str, Any] =embed_dim
A__ : Dict =depths
A__ : List[Any] =len(__UpperCAmelCase )
A__ : List[Any] =num_heads
A__ : List[Any] =window_size
A__ : int =mlp_ratio
A__ : Dict =qkv_bias
A__ : Any =hidden_dropout_prob
A__ : Any =attention_probs_dropout_prob
A__ : Optional[int] =drop_path_rate
A__ : Optional[int] =hidden_act
A__ : Any =use_absolute_embeddings
A__ : Any =layer_norm_eps
A__ : Optional[int] =initializer_range
A__ : Tuple =upscale
A__ : Tuple =img_range
A__ : int =resi_connection
A__ : Any =upsampler
| 134 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ProphetNetTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict:
"""simple docstring"""
a = '''UNwant\u00E9d,running'''
a = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__UpperCAmelCase = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class lowerCamelCase (unittest.TestCase , _snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = load_tool('text-question-answering' )
self.tool.setup()
UpperCAmelCase_ : int = load_tool('text-question-answering' , remote=__UpperCAmelCase )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.tool(__UpperCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__UpperCAmelCase , 'launched the BigScience Research Workshop' )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = self.remote_tool(__UpperCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__UpperCAmelCase , 'launched the BigScience Research Workshop' )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : str = self.tool(text=__UpperCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__UpperCAmelCase , 'launched the BigScience Research Workshop' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.remote_tool(text=__UpperCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__UpperCAmelCase , 'launched the BigScience Research Workshop' )
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A : Dict = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
__A : List[str] = {
"AI-Sweden/gpt-sw3-126m": 2048,
"AI-Sweden/gpt-sw3-350m": 2048,
"AI-Sweden/gpt-sw3-1.6b": 2048,
"AI-Sweden/gpt-sw3-6.7b": 2048,
"AI-Sweden/gpt-sw3-20b": 2048,
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=False , __UpperCamelCase : Dict=False , __UpperCamelCase : Dict=None , __UpperCamelCase : int=None , __UpperCamelCase : str=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[Any] , )->None:
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
_UpperCAmelCase = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_UpperCAmelCase = '''<|endoftext|>''' if eos_token is None else eos_token
_UpperCAmelCase = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_UpperCAmelCase = unk_token if pad_token is None else pad_token
_UpperCAmelCase = eos_token if bos_token is None else bos_token
else:
_UpperCAmelCase = '''<pad>''' if pad_token is None else pad_token
_UpperCAmelCase = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
_UpperCAmelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_UpperCAmelCase = re.compile(
F'[{"".join(map(__UpperCAmelCase , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : int )->Any:
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowercase__ ( self : Optional[Any] )->int:
return len(self.sp_model )
def lowercase__ ( self : List[Any] , __UpperCamelCase : str )->str:
_UpperCAmelCase = self.non_printing_characters_re.sub('''''' , __UpperCAmelCase )
# Normalize whitespaces
_UpperCAmelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
_UpperCAmelCase = unicodedata.normalize('''NFC''' , __UpperCAmelCase )
return text
def lowercase__ ( self : List[Any] , __UpperCamelCase : str , **__UpperCamelCase : Tuple )->List[str]:
_UpperCAmelCase = self.preprocess_text(__UpperCAmelCase )
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : str )->int:
return self.sp_model.PieceToId(__UpperCAmelCase )
def lowercase__ ( self : Any , __UpperCamelCase : int )->str:
return self.sp_model.IdToPiece(__UpperCAmelCase )
@staticmethod
def lowercase__ ( __UpperCamelCase : str )->str:
return out_string
def lowercase__ ( self : str , __UpperCamelCase : List[str] )->str:
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
_UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_UpperCAmelCase = True
_UpperCAmelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
_UpperCAmelCase = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string
def lowercase__ ( self : List[str] )->Dict[str, int]:
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None )->Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : Union[str, bool] = False )->Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
_UpperCAmelCase = self.preprocess_text(__UpperCAmelCase )
_UpperCAmelCase = self.sp_model.encode(__UpperCAmelCase )
else:
_UpperCAmelCase = [self.preprocess_text(__UpperCAmelCase ) for t in text]
_UpperCAmelCase = self.sp_model.encode(__UpperCAmelCase )
if return_tensors is True or return_tensors == "pt":
_UpperCAmelCase = torch.tensor(__UpperCAmelCase )
return token_ids
def lowercase__ ( self : Dict , __UpperCamelCase : Union[int, List[int]] )->str:
return self.sp_model.decode(__UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : "Conversation" )->List[int]:
_UpperCAmelCase = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
_UpperCAmelCase = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(__UpperCAmelCase ) + F'{self.bos_token}Bot:'
)
return self.encode(text=__UpperCAmelCase )
| 260 |
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = BertJapaneseTokenizer
A__ = False
A__ = True
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
super().setUp()
lowercase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """こんにちは、世界。 \nこんばんは、世界。"""
lowercase__ = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def lowerCamelCase__ (self : str , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.get_input_output_texts(__UpperCAmelCase )
lowercase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowercase__ = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
return text, ids
def lowerCamelCase__ (self : Optional[Any] ) -> Any:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ (self : int ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ (self : Dict ) -> Any:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file )
lowercase__ = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCamelCase__ (self : List[str] ) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(__UpperCAmelCase )
lowercase__ = """こんにちは、世界。\nこんばんは、世界。"""
lowercase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
lowercase__ = pickle.load(__UpperCAmelCase )
lowercase__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
try:
lowercase__ = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
try:
lowercase__ = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = MecabTokenizer(do_lower_case=__UpperCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
try:
lowercase__ = MecabTokenizer(
do_lower_case=__UpperCAmelCase , normalize_text=__UpperCAmelCase , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = MecabTokenizer(normalize_text=__UpperCAmelCase , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def lowerCamelCase__ (self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(__UpperCAmelCase )
lowercase__ = """こんにちは、世界。\nこんばんは、世界。"""
lowercase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
lowercase__ = pickle.load(__UpperCAmelCase )
lowercase__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_sudachi
def lowerCamelCase__ (self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def lowerCamelCase__ (self : str ) -> Tuple:
"""simple docstring"""
lowercase__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def lowerCamelCase__ (self : List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ = SudachiTokenizer(do_lower_case=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = SudachiTokenizer(normalize_text=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = SudachiTokenizer(trim_whitespace=__UpperCAmelCase , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def lowerCamelCase__ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(__UpperCAmelCase )
lowercase__ = """こんにちは、世界。\nこんばんは、世界。"""
lowercase__ = tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase__ = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__UpperCAmelCase , """wb""" ) as handle:
pickle.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(__UpperCAmelCase , """rb""" ) as handle:
lowercase__ = pickle.load(__UpperCAmelCase )
lowercase__ = tokenizer_new.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@require_jumanpp
def lowerCamelCase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
lowercase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase__ (self : int ) -> Tuple:
"""simple docstring"""
lowercase__ = JumanppTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase__ (self : List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = JumanppTokenizer(normalize_text=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCamelCase__ (self : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = JumanppTokenizer(trim_whitespace=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def lowerCamelCase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
lowercase__ = {}
for i, token in enumerate(__UpperCAmelCase ):
lowercase__ = i
lowercase__ = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
lowercase__ = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
lowercase__ = tokenizer.subword_tokenizer
lowercase__ = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(__UpperCAmelCase , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
lowercase__ = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(__UpperCAmelCase , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
lowercase__ = tokenizer.encode("""ありがとう。""" , add_special_tokens=__UpperCAmelCase )
lowercase__ = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__UpperCAmelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = BertJapaneseTokenizer
A__ = False
def lowerCamelCase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
super().setUp()
lowercase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ (self : List[str] , **_UpperCAmelCase : str ) -> List[Any]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__UpperCAmelCase )
def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = """こんにちは、世界。 \nこんばんは、世界。"""
lowercase__ = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ (self : Tuple ) -> int:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ (self : Optional[int] ) -> Dict:
"""simple docstring"""
pass # TODO add if relevant
def lowerCamelCase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
lowercase__ = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
__UpperCAmelCase , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCamelCase__ (self : Any ) -> Optional[int]:
"""simple docstring"""
lowercase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowercase__ = {}
for i, token in enumerate(__UpperCAmelCase ):
lowercase__ = i
lowercase__ = CharacterTokenizer(vocab=__UpperCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def lowerCamelCase__ (self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
lowercase__ = tokenizer.encode("""ありがとう。""" , add_special_tokens=__UpperCAmelCase )
lowercase__ = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__UpperCAmelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
lowercase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = """cl-tohoku/bert-base-japanese"""
lowercase__ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
class A ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(__UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
lowercase__ = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(__UpperCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 305 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[str] = '''markuplm'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : str=5_1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Dict=1E-12 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=2_5_6 , SCREAMING_SNAKE_CASE__ : Dict=1_0_2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=2_1_6 , SCREAMING_SNAKE_CASE__ : str=1_0_0_1 , SCREAMING_SNAKE_CASE__ : int=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_0 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> str:
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
a_ : Any = vocab_size
a_ : Tuple = hidden_size
a_ : str = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : int = hidden_act
a_ : Dict = intermediate_size
a_ : List[Any] = hidden_dropout_prob
a_ : Optional[int] = attention_probs_dropout_prob
a_ : int = max_position_embeddings
a_ : Optional[int] = type_vocab_size
a_ : int = initializer_range
a_ : Union[str, Any] = layer_norm_eps
a_ : List[str] = position_embedding_type
a_ : Union[str, Any] = use_cache
a_ : int = classifier_dropout
# additional properties
a_ : Union[str, Any] = max_depth
a_ : Any = max_xpath_tag_unit_embeddings
a_ : Any = max_xpath_subs_unit_embeddings
a_ : str = tag_pad_id
a_ : Optional[Any] = subs_pad_id
a_ : List[Any] = xpath_unit_hidden_size
| 32 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a_ ( *lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Union[Dict, Any]] = None , lowerCamelCase : List[str]=True , lowerCamelCase : List[str]=2 ):
from .. import __version__
lowerCAmelCase = take_from
lowerCAmelCase = ()
if not isinstance(args[0] , lowerCamelCase ):
lowerCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCamelCase ).base_version ) >= version.parse(lowerCamelCase ):
raise ValueError(
f'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
f''' version {__version__} is >= {version_name}''' )
lowerCAmelCase = None
if isinstance(lowerCamelCase , lowerCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCamelCase ),)
lowerCAmelCase = f'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowerCamelCase , lowerCamelCase ):
values += (getattr(lowerCamelCase , lowerCamelCase ),)
lowerCAmelCase = f'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
lowerCAmelCase = f'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
lowerCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , lowerCamelCase , stacklevel=lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) > 0:
lowerCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
lowerCAmelCase = call_frame.filename
lowerCAmelCase = call_frame.lineno
lowerCAmelCase = call_frame.function
lowerCAmelCase , lowerCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowerCamelCase ) == 0:
return
elif len(lowerCamelCase ) == 1:
return values[0]
return values
| 4 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0 | 0 |
'''simple docstring'''
from __future__ import annotations
_A : Union[str, Any] ='''Muhammad Umer Farooq'''
_A : Any ='''MIT'''
_A : Optional[Any] ='''1.0.0'''
_A : Union[str, Any] ='''Muhammad Umer Farooq'''
_A : Any ='''contact@muhammadumerfarooq.me'''
_A : Optional[int] ='''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowercase ( _lowercase ):
def __init__( self: Union[str, Any] , UpperCamelCase__: str ):
super().__init__()
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Any = domain
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: list[tuple[str, str | None]] ):
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCamelCase__ : str = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return ".".join(get_sub_domain_name(UpperCamelCase ).split(""".""" )[-2:] )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
return parse.urlparse(UpperCamelCase ).netloc
def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "https://github.com" ) -> list[str]:
lowerCamelCase__ : Tuple = get_domain_name(UpperCamelCase )
# Initialize the parser
lowerCamelCase__ : List[str] = Parser(UpperCamelCase )
try:
# Open URL
lowerCamelCase__ : int = requests.get(UpperCamelCase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCamelCase__ : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCamelCase__ : Union[str, Any] = requests.get(UpperCamelCase )
# Get the valid email.
lowerCamelCase__ : Optional[Any] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCamelCase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCamelCase )
if __name__ == "__main__":
_A : List[Any] =emails_from_url('''https://github.com''')
print(F'{len(emails)} emails found:')
print('''\n'''.join(sorted(emails)))
| 41 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''', '''negative_prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_image_processor
a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) ->int:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.image_embeds
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -10:]
a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
a = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 0 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _snake_case ( lowercase__ : Namespace ) -> Optional[int]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__UpperCAmelCase = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _SCREAMING_SNAKE_CASE ( A__ ):
@staticmethod
def __lowerCAmelCase ( __A ) -> List[str]:
lowerCAmelCase_ :Dict = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Model\'s type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=__UpperCAmelCase , required=__UpperCAmelCase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=__UpperCAmelCase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __A , __A , __A , __A , __A , *__A , ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
lowerCAmelCase_ :int = model_type
lowerCAmelCase_ :Dict = tf_checkpoint
lowerCAmelCase_ :str = pytorch_dump_output
lowerCAmelCase_ :Dict = config
lowerCAmelCase_ :Any = finetuning_task_name
def __lowerCAmelCase ( self ) -> Optional[Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
lowerCAmelCase_ :Optional[int] = self._tf_checkpoint
lowerCAmelCase_ :List[Any] = """"""
else:
lowerCAmelCase_ :Dict = self._tf_checkpoint
lowerCAmelCase_ :Union[str, Any] = """"""
convert_transfo_xl_checkpoint_to_pytorch(
__UpperCAmelCase , self._config , self._pytorch_dump_output , __UpperCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" )
| 84 |
from math import factorial
UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def _a ( a :int = 60 , a :int = 1_000_000 ) -> int:
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
a = 0
# the cached sizes of the previous chains
a = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
a = set()
a = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
a = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 1_0_0 ):
UpperCAmelCase__ : List[Any] = n * (n + 1) * (2 * n + 1) / 6
UpperCAmelCase__ : Optional[int] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""") | 163 |
def _a ( a :int = 100 ) -> int:
a = n * (n + 1) * (2 * n + 1) / 6
a = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 0 |
def a ( A__ : int ) -> bool:
"""simple docstring"""
if num < 0:
return False
_lowercase =num
_lowercase =0
while num > 0:
_lowercase =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowerCAmelCase , 2 ) + pow(__lowerCAmelCase , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Tuple=2_24 , lowerCAmelCase_ : Dict=30 , lowerCAmelCase_ : Any=4_00 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) -> Optional[int]:
'''simple docstring'''
A__ : Dict =size if size is not None else {"""height""": 18, """width""": 18}
A__ : Any =parent
A__ : int =batch_size
A__ : str =num_channels
A__ : int =image_size
A__ : Optional[int] =min_resolution
A__ : Any =max_resolution
A__ : List[str] =do_resize
A__ : List[str] =size
A__ : Union[str, Any] =do_normalize
A__ : Dict =image_mean
A__ : Union[str, Any] =image_std
def lowercase__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowerCamelCase ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = ViTImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
A__ : Any =EfficientFormerImageProcessorTester(self )
@property
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
A__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size""" ) )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
pass
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : Dict =prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
A__ : List[Any] =image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A__ : Dict =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Union[str, Any] =prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
A__ : Optional[Any] =image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A__ : str =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def lowercase__ ( self : Dict ) -> Any:
'''simple docstring'''
A__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : List[Any] =prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
A__ : Dict =image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A__ : Tuple =image_processor(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 134 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__UpperCAmelCase = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
__UpperCAmelCase = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__UpperCAmelCase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__UpperCAmelCase = sorted(arg_to_scheduler.keys())
__UpperCAmelCase = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCamelCase (pl.LightningModule ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase="base" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ) -> Tuple:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = Path(self.hparams.output_dir )
UpperCAmelCase_ : List[Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_ : str = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
UpperCAmelCase_ : List[Any] = config
UpperCAmelCase_ : str = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
UpperCAmelCase_ : str = tokenizer
UpperCAmelCase_ : int = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_ : Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
UpperCAmelCase_ : Dict = model
def __UpperCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : List[str] = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_ : List[Any] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase_ : Optional[int] = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = self.model
UpperCAmelCase_ : Union[str, Any] = ['bias', 'LayerNorm.weight']
UpperCAmelCase_ : Tuple = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_ : Optional[int] = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
UpperCAmelCase_ : Any = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase_ : List[Any] = optimizer
UpperCAmelCase_ : Optional[int] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Any:
return self.validation_end(__UpperCAmelCase )
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_ : Tuple = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
if stage == "test":
UpperCAmelCase_ : str = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_ : Dict = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
UpperCAmelCase_ : int = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ) -> str:
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.train_loader
def __UpperCAmelCase ( self ) -> List[Any]:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __UpperCAmelCase ( self ) -> Tuple:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[Any]:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.output_dir.joinpath('best_tfmr' )
UpperCAmelCase_ : Union[str, Any] = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
parser.add_argument(
'--model_name_or_path' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__UpperCAmelCase ).parent / 'test_run' / 'cache' ) , type=__UpperCAmelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__UpperCAmelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__UpperCAmelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__UpperCAmelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__UpperCAmelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=__UpperCAmelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__UpperCAmelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=__UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__UpperCAmelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__UpperCAmelCase )
parser.add_argument('--train_batch_size' , default=3_2 , type=__UpperCAmelCase )
parser.add_argument('--eval_batch_size' , default=3_2 , type=__UpperCAmelCase )
parser.add_argument('--adafactor' , action='store_true' )
class lowerCamelCase (pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCamelCase (pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class lowerCamelCase (pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
UpperCAmelCase_ : Tuple = trainer.lr_schedulers[0]['scheduler']
UpperCAmelCase_ : Optional[Any] = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
rank_zero_info('***** Validation results *****' )
UpperCAmelCase_ : Dict = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
rank_zero_info('***** Test results *****' )
UpperCAmelCase_ : Optional[int] = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_ : Optional[int] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__UpperCAmelCase , 'w' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__UpperCAmelCase , str(metrics[key] ) ) )
def lowercase__ ( __snake_case : Union[str, Any] , __snake_case : int ):
'''simple docstring'''
parser.add_argument(
'--output_dir' , default=str(Path(__snake_case ).parent / 'test_run' / 'model_checkpoints' ) , type=__snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=__snake_case , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=__snake_case )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=__snake_case , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=__snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=__snake_case , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(__snake_case ).parent / 'test_run' / 'dummy-train-data' ) , type=__snake_case , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowercase__ ( __snake_case : BaseTransformer , __snake_case : argparse.Namespace , __snake_case : Tuple=None , __snake_case : Any=True , __snake_case : List[str]=[] , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , **__snake_case : Optional[Any] , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_ : Optional[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__snake_case )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__snake_case )
if logging_callback is None:
UpperCAmelCase_ : int = LoggingCallback()
UpperCAmelCase_ : Optional[int] = {}
if args.fpaa:
UpperCAmelCase_ : Tuple = 16
if args.gpus > 1:
UpperCAmelCase_ : Union[str, Any] = 'auto'
UpperCAmelCase_ : str = 'ddp'
UpperCAmelCase_ : List[str] = args.accumulate_grad_batches
UpperCAmelCase_ : str = None
UpperCAmelCase_ : List[str] = 'auto'
UpperCAmelCase_ : List[str] = pl.Trainer.from_argparse_args(
__snake_case , weights_summary=__snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **__snake_case , )
if args.do_train:
trainer.fit(__snake_case )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
__A : Tuple = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
__A : Dict = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
__A : int = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _a ( datasets.Metric):
"""simple docstring"""
def lowercase__ ( self : List[Any] )->List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def lowercase__ ( self : str , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCAmelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCAmelCase = evaluate(dataset=__UpperCAmelCase , predictions=__UpperCAmelCase )
return score
| 260 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A : Union[str, Any] = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
A : int = dataset.iloc[:, 1:2].values
A : int = dataset.iloc[:, 2].values
A , A , A , A : Dict = train_test_split(X, y, test_size=0.2, random_state=0)
A : Any = PolynomialFeatures(degree=4)
A : Dict = poly_reg.fit_transform(X)
A : str = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
plt.scatter(__magic_name__ , __magic_name__ , color="""red""" )
plt.plot(__magic_name__ , pol_reg.predict(poly_reg.fit_transform(__magic_name__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 305 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase_ : Tuple = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 32 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''t5'''
__snake_case = ['''past_key_values''']
__snake_case = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=32_128 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=128 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=1e-6 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str , ) ->Optional[Any]:
"""simple docstring"""
a = vocab_size
a = d_model
a = d_kv
a = d_ff
a = num_layers
a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a = num_heads
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = dropout_rate
a = layer_norm_epsilon
a = initializer_factor
a = feed_forward_proj
a = use_cache
a = self.feed_forward_proj.split('''-''' )
a = act_info[-1]
a = act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
a = '''past_encoder_sequence + sequence'''
a = {0: '''batch'''}
a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a = {0: '''batch''', 1: '''decoder_sequence'''}
a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return 13
| 0 | 0 |
'''simple docstring'''
def a_ ( lowerCamelCase : int = 10 , lowerCamelCase : int = 22 ):
lowerCAmelCase = range(1 , lowerCamelCase )
lowerCAmelCase = range(1 , lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(10, 22) = }''')
| 4 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_A : Tuple =logging.get_logger(__name__)
_A : Optional[int] ={
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class _lowercase ( _lowercase ):
a = """van"""
def __init__( self: List[str] , UpperCamelCase__: int=224 , UpperCamelCase__: Tuple=3 , UpperCamelCase__: Optional[Any]=[7, 3, 3, 3] , UpperCamelCase__: Tuple=[4, 2, 2, 2] , UpperCamelCase__: Optional[Any]=[64, 128, 320, 512] , UpperCamelCase__: List[str]=[3, 3, 12, 3] , UpperCamelCase__: Optional[int]=[8, 8, 4, 4] , UpperCamelCase__: Optional[int]="gelu" , UpperCamelCase__: Optional[Any]=0.02 , UpperCamelCase__: Optional[Any]=1e-6 , UpperCamelCase__: str=1e-2 , UpperCamelCase__: Dict=0.0 , UpperCamelCase__: Optional[Any]=0.0 , **UpperCamelCase__: List[Any] , ):
super().__init__(**__UpperCAmelCase )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[Any] = num_channels
lowerCamelCase__ : Optional[int] = patch_sizes
lowerCamelCase__ : List[Any] = strides
lowerCamelCase__ : Optional[Any] = hidden_sizes
lowerCamelCase__ : List[Any] = depths
lowerCamelCase__ : Optional[Any] = mlp_ratios
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : List[str] = layer_norm_eps
lowerCamelCase__ : Optional[Any] = layer_scale_init_value
lowerCamelCase__ : Union[str, Any] = drop_path_rate
lowerCamelCase__ : Optional[Any] = dropout_rate
| 41 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "contact@muhammadumerfarooq.me"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
set_seed(7_70)
__UpperCAmelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
__UpperCAmelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
__UpperCAmelCase = os.path.dirname(os.path.abspath(__file__))
__UpperCAmelCase = os.path.join(os.path.expanduser('~'), '.cache')
__UpperCAmelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _snake_case ( lowercase__ : List[str] , lowercase__ : Union[str, Any]=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = model_type
if use_small:
key += "_small"
return os.path.join(lowercase__ , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def _snake_case ( lowercase__ : Any , lowercase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
os.makedirs(lowercase__ , exist_ok=lowercase__ )
hf_hub_download(repo_id=lowercase__ , filename=lowercase__ , local_dir=lowercase__ )
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[Any]=False , lowercase__ : Optional[Any]="text" ) -> List[str]:
'''simple docstring'''
if model_type == "text":
lowerCAmelCase_ :List[Any] = BarkSemanticModel
lowerCAmelCase_ :int = BarkSemanticConfig
lowerCAmelCase_ :Optional[Any] = BarkSemanticGenerationConfig
elif model_type == "coarse":
lowerCAmelCase_ :Any = BarkCoarseModel
lowerCAmelCase_ :List[Any] = BarkCoarseConfig
lowerCAmelCase_ :Dict = BarkCoarseGenerationConfig
elif model_type == "fine":
lowerCAmelCase_ :List[str] = BarkFineModel
lowerCAmelCase_ :int = BarkFineConfig
lowerCAmelCase_ :Optional[Any] = BarkFineGenerationConfig
else:
raise NotImplementedError()
lowerCAmelCase_ :Tuple = f"""{model_type}_small""" if use_small else model_type
lowerCAmelCase_ :Tuple = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowercase__ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
lowerCAmelCase_ :Optional[Any] = torch.load(lowercase__ , map_location=lowercase__ )
# this is a hack
lowerCAmelCase_ :Tuple = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
lowerCAmelCase_ :Tuple = model_args["""vocab_size"""]
lowerCAmelCase_ :Dict = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
lowerCAmelCase_ :str = model_args.pop("""n_head""" )
lowerCAmelCase_ :Any = model_args.pop("""n_embd""" )
lowerCAmelCase_ :Optional[int] = model_args.pop("""n_layer""" )
lowerCAmelCase_ :Optional[Any] = ConfigClass(**checkpoint["""model_args"""] )
lowerCAmelCase_ :Union[str, Any] = ModelClass(config=lowercase__ )
lowerCAmelCase_ :List[str] = GenerationConfigClass()
lowerCAmelCase_ :int = model_generation_config
lowerCAmelCase_ :Optional[int] = checkpoint["""model"""]
# fixup checkpoint
lowerCAmelCase_ :Union[str, Any] = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(lowercase__ ):
# replace part of the key with corresponding layer name in HF implementation
lowerCAmelCase_ :List[Any] = k[len(lowercase__ ) :]
for old_layer_name in new_layer_name_dict:
lowerCAmelCase_ :Any = new_k.replace(lowercase__ , new_layer_name_dict[old_layer_name] )
lowerCAmelCase_ :int = state_dict.pop(lowercase__ )
lowerCAmelCase_ :int = set(state_dict.keys() ) - set(model.state_dict().keys() )
lowerCAmelCase_ :Union[str, Any] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
lowerCAmelCase_ :Union[str, Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
lowerCAmelCase_ :List[Any] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(lowercase__ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowercase__ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowercase__ , strict=lowercase__ )
lowerCAmelCase_ :List[Any] = model.num_parameters(exclude_embeddings=lowercase__ )
lowerCAmelCase_ :List[str] = checkpoint["""best_val_loss"""].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowercase__ , 3 )} loss""" )
model.eval()
model.to(lowercase__ )
del checkpoint, state_dict
return model
def _snake_case ( lowercase__ : Any , lowercase__ : List[str]=False , lowercase__ : Dict="text" ) -> Any:
'''simple docstring'''
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
lowerCAmelCase_ :List[str] = """cpu""" # do conversion on cpu
lowerCAmelCase_ :Tuple = _get_ckpt_path(lowercase__ , use_small=lowercase__ )
lowerCAmelCase_ :Union[str, Any] = _load_model(lowercase__ , lowercase__ , model_type=lowercase__ , use_small=lowercase__ )
# load bark initial model
lowerCAmelCase_ :Optional[int] = _bark_load_model(lowercase__ , """cpu""" , model_type=lowercase__ , use_small=lowercase__ )
if model_type == "text":
lowerCAmelCase_ :Tuple = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=lowercase__ ) != bark_model.get_num_params():
raise ValueError("""initial and new models don\'t have the same number of parameters""" )
# check if same output as the bark model
lowerCAmelCase_ :Optional[int] = 5
lowerCAmelCase_ :List[str] = 1_0
if model_type in ["text", "coarse"]:
lowerCAmelCase_ :int = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
lowerCAmelCase_ :Tuple = bark_model(lowercase__ )[0]
lowerCAmelCase_ :int = model(lowercase__ )
# take last logits
lowerCAmelCase_ :Any = output_new_model_total.logits[:, [-1], :]
else:
lowerCAmelCase_ :int = 3
lowerCAmelCase_ :Any = 8
lowerCAmelCase_ :Optional[Any] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
lowerCAmelCase_ :int = model(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[Any] = bark_model(lowercase__ , lowercase__ )
lowerCAmelCase_ :Dict = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don\'t have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
def _snake_case ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = os.path.join(lowercase__ , lowercase__ )
lowerCAmelCase_ :Tuple = BarkSemanticConfig.from_pretrained(os.path.join(lowercase__ , """config.json""" ) )
lowerCAmelCase_ :List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowercase__ , """config.json""" ) )
lowerCAmelCase_ :str = BarkFineConfig.from_pretrained(os.path.join(lowercase__ , """config.json""" ) )
lowerCAmelCase_ :Tuple = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
lowerCAmelCase_ :Optional[Any] = BarkSemanticModel.from_pretrained(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = BarkCoarseModel.from_pretrained(lowercase__ )
lowerCAmelCase_ :Any = BarkFineModel.from_pretrained(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
lowerCAmelCase_ :Optional[Any] = BarkConfig.from_sub_model_configs(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
lowerCAmelCase_ :Union[str, Any] = BarkModel(lowercase__ )
lowerCAmelCase_ :str = semantic
lowerCAmelCase_ :Union[str, Any] = coarseAcoustic
lowerCAmelCase_ :Optional[int] = fineAcoustic
lowerCAmelCase_ :int = codec
lowerCAmelCase_ :Optional[Any] = bark_generation_config
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
bark.save_pretrained(lowercase__ , repo_id=lowercase__ , push_to_hub=lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
__UpperCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 84 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def _a ( ) -> Optional[int]:
a = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a = parser.parse_args()
return args.f
def _a ( a :Any ) -> Tuple:
a = {}
a = os.path.join(a , '''all_results.json''' )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
a = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
def _a ( ) -> int:
a = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( lowercase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : str ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
a = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''image_classification_no_trainer''' ) ) )
| 0 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : Any = []
for rt in rc.restypes:
UpperCAmelCase__ : Tuple = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase__ : List[str] = {name: i for i, name in enumerate(UpperCamelCase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
UpperCAmelCase__ : int = torch.tensor(
UpperCamelCase__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCAmelCase__ : int = torch.tensor(
UpperCamelCase__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCAmelCase__ : Any = torch.tensor(
UpperCamelCase__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
UpperCAmelCase__ : List[str] = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase__ : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase__ : int = restype_atomaa_mask[protein_aatype]
UpperCAmelCase__ : Any = residx_atomaa_mask
UpperCAmelCase__ : int = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase__ : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase__ : Optional[int] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase__ : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase__ : List[Any] = rc.restype_atoa[restype_letter]
UpperCAmelCase__ : int = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase__ : Optional[int] = rc.atom_order[atom_name]
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : int = restype_atomaa_mask[protein_aatype]
UpperCAmelCase__ : Any = residx_atomaa_mask
return protein
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = tree_map(lambda UpperCamelCase__ : torch.tensor(UpperCamelCase__ , device=batch["""aatype"""].device ) , UpperCamelCase__ , np.ndarray )
UpperCAmelCase__ : Optional[Any] = tensor_tree_map(lambda UpperCamelCase__ : np.array(UpperCamelCase__ ) , make_atomaa_masks(UpperCamelCase__ ) )
return out | 163 |
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_a = """convnextv2"""
def __init__( self , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="gelu" , lowerCAmelCase=0.02 , lowerCAmelCase=1e-12 , lowerCAmelCase=0.0 , lowerCAmelCase=224 , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_lowercase =num_channels
_lowercase =patch_size
_lowercase =num_stages
_lowercase =[96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_lowercase =[3, 3, 9, 3] if depths is None else depths
_lowercase =hidden_act
_lowercase =initializer_range
_lowercase =layer_norm_eps
_lowercase =drop_path_rate
_lowercase =image_size
_lowercase =['stem'] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
_lowercase , _lowercase =get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 205 |
def _a ( a :float , a :float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 0 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
a = SMALL_MODEL_IDENTIFIER
a = '''pt'''
a = '''tf'''
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : str ) ->Union[str, Any]:
"""simple docstring"""
a = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ) ->List[str]:
"""simple docstring"""
a = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = '''mock_framework'''
# Framework provided - return whatever the user provides
a = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : str ) ->int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
a = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
a = MagicMock(return_value=__UpperCAmelCase )
a = MagicMock(return_value=__UpperCAmelCase )
with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch(
'''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
a = FeaturesManager.determine_framework(self.test_model )
| 0 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Dict = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'canine'
def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=7_68 , lowerCAmelCase_ : Optional[int]=12 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : Dict=30_72 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[Any]=1_63_84 , lowerCAmelCase_ : int=16 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Union[str, Any]=1e-12 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Any=0xE000 , lowerCAmelCase_ : List[Any]=0xE001 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : Optional[int]=1_63_84 , lowerCAmelCase_ : List[str]=1_28 , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
A__ : Optional[int] =max_position_embeddings
A__ : Tuple =hidden_size
A__ : List[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : Tuple =intermediate_size
A__ : Optional[Any] =hidden_act
A__ : str =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[str] =initializer_range
A__ : List[Any] =type_vocab_size
A__ : Optional[Any] =layer_norm_eps
# Character config:
A__ : Optional[Any] =downsampling_rate
A__ : Union[str, Any] =upsampling_kernel_size
A__ : Tuple =num_hash_functions
A__ : Optional[int] =num_hash_buckets
A__ : List[str] =local_transformer_stride
| 134 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ProphetNetTokenizer
__snake_case = False
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
super().setUp()
a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str ) ->Dict:
"""simple docstring"""
a = '''UNwant\u00E9d,running'''
a = '''unwanted, running'''
return input_text, output_text
def __lowerCAmelCase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __lowerCAmelCase ( self : Dict ) ->str:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , strip_accents=__UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = BasicTokenizer(do_lower_case=__UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
a = {}
for i, token in enumerate(__UpperCAmelCase ):
a = i
a = WordpieceTokenizer(vocab=__UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def __lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
a = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
a = tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
a = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCAmelCase ( self : Any ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
a = tokenizer.encode('''sequence builders''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
a = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 0 | 0 |
from ...configuration_utils import PretrainedConfig
__UpperCAmelCase = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = '''tapas'''
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1_0_2_4 , _UpperCamelCase=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=0 , _UpperCamelCase=10.0 , _UpperCamelCase=0 , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=1.0 , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=1.0 , _UpperCamelCase=1.0 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase="ratio" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=6_4 , _UpperCamelCase=3_2 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ) -> Union[str, Any]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Optional[Any] = type_vocab_sizes
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCAmelCase_ : Union[str, Any] = positive_label_weight
UpperCAmelCase_ : Optional[Any] = num_aggregation_labels
UpperCAmelCase_ : List[str] = aggregation_loss_weight
UpperCAmelCase_ : List[Any] = use_answer_as_supervision
UpperCAmelCase_ : Union[str, Any] = answer_loss_importance
UpperCAmelCase_ : str = use_normalized_answer_loss
UpperCAmelCase_ : List[Any] = huber_loss_delta
UpperCAmelCase_ : Union[str, Any] = temperature
UpperCAmelCase_ : int = aggregation_temperature
UpperCAmelCase_ : Any = use_gumbel_for_cells
UpperCAmelCase_ : List[str] = use_gumbel_for_aggregation
UpperCAmelCase_ : Dict = average_approximation_function
UpperCAmelCase_ : int = cell_selection_preference
UpperCAmelCase_ : str = answer_loss_cutoff
UpperCAmelCase_ : str = max_num_rows
UpperCAmelCase_ : List[Any] = max_num_columns
UpperCAmelCase_ : str = average_logits_per_cell
UpperCAmelCase_ : Optional[int] = select_one_column
UpperCAmelCase_ : Any = allow_empty_column_selection
UpperCAmelCase_ : str = init_cell_selection_weights_to_zero
UpperCAmelCase_ : List[Any] = reset_position_index_per_cell
UpperCAmelCase_ : List[str] = disable_per_token_loss
# Aggregation hyperparameters
UpperCAmelCase_ : Any = aggregation_labels
UpperCAmelCase_ : Union[str, Any] = no_aggregation_label_index
if isinstance(self.aggregation_labels , __UpperCAmelCase ):
UpperCAmelCase_ : Any = {int(__UpperCAmelCase ): v for k, v in aggregation_labels.items()}
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE , x % y )
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : int = 20 ):
'''simple docstring'''
_UpperCAmelCase = 1
for i in range(1 , n + 1 ):
_UpperCAmelCase = lcm(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 260 |
def _a ( a :int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
a = gray_code_sequence_string(a )
#
# convert them to integers
for i in range(len(a ) ):
a = int(sequence[i] , 2 )
return sequence
def _a ( a :int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a = gray_code_sequence_string(bit_count - 1 )
a = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a = '''0''' + smaller_sequence[i]
sequence.append(a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a = '''1''' + smaller_sequence[i]
sequence.append(a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
def UpperCamelCase ( __magic_name__ : int ) -> int:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase__ = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__magic_name__ )
else:
lowercase__ = sylvester(number - 1 )
lowercase__ = num - 1
lowercase__ = num
return lower * upper + 1
if __name__ == "__main__":
print(F'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 305 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[Any] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
# fmt: off
a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
a = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , **__UpperCAmelCase : List[Any] ) ->int:
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] , **__UpperCAmelCase : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : str ) ->Dict:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_image_processor()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
a = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = VisionTextDualEncoderProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 0 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 32 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _a ( a :List[Any] ) -> Optional[int]:
a = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def _a ( a :List[Any] , a :Optional[int] ) -> Dict:
a = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def _a ( a :Any ) -> List[Any]:
a = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", '''stage2.cls_token''') )
return token
def _a ( ) -> Optional[int]:
a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _a ( a :Tuple , a :Optional[int] , a :List[Any] , a :Union[str, Any] ) -> Optional[int]:
a = '''imagenet-1k-id2label.json'''
a = 1_000
a = '''huggingface/label-files'''
a = num_labels
a = json.load(open(cached_download(hf_hub_url(a , a , repo_type='''dataset''' ) ) , '''r''' ) )
a = {int(a ): v for k, v in idalabel.items()}
a = idalabel
a = {v: k for k, v in idalabel.items()}
a = a = CvtConfig(num_labels=a , idalabel=a , labelaid=a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
a = [2, 2, 20]
a = [3, 12, 16]
a = [192, 768, 1_024]
a = CvtForImageClassification(a )
a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
a = image_size
a = torch.load(a , map_location=torch.device('''cpu''' ) )
a = OrderedDict()
a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
a = list_of_state_dict + cls_token(a )
a = list_of_state_dict + embeddings(a )
for cnt in range(config.depth[idx] ):
a = list_of_state_dict + attention(a , a )
a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(a )
for i in range(len(a ) ):
a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
UpperCAmelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 0 | 0 |
'''simple docstring'''
from math import isclose, sqrt
def a_ ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float ):
lowerCAmelCase = point_y / 4 / point_x
lowerCAmelCase = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCAmelCase = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCAmelCase = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCAmelCase = outgoing_gradient**2 + 4
lowerCAmelCase = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCAmelCase = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCAmelCase = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCAmelCase = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCAmelCase = x_minus if isclose(lowerCamelCase , lowerCamelCase ) else x_plus
lowerCAmelCase = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def a_ ( lowerCamelCase : float = 1.4 , lowerCamelCase : float = -9.6 ):
lowerCAmelCase = 0
lowerCAmelCase = first_x_coord
lowerCAmelCase = first_y_coord
lowerCAmelCase = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = next_point(lowerCamelCase , lowerCamelCase , lowerCamelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 |
from __future__ import annotations
UpperCAmelCase__ = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _a ( a :Matrix , a :int , a :int , a :int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _a ( a :Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _a ( a :Matrix ) -> Matrix | None:
if location := find_empty_location(a ):
a , a = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a = digit
if sudoku(a ) is not None:
return grid
a = 0
return None
def _a ( a :Matrix ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
UpperCAmelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 0 | 0 |
'''simple docstring'''
from torch import nn
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 41 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaPriorPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''', '''negative_prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''generator''',
'''num_inference_steps''',
'''latents''',
'''negative_prompt''',
'''guidance_scale''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
a = PriorTransformer(**__UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
a = CLIPVisionModelWithProjection(__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__UpperCAmelCase , do_normalize=__UpperCAmelCase , do_resize=__UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_image_encoder
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_image_processor
a = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_000 , clip_sample=__UpperCAmelCase , clip_sample_range=10.0 , )
a = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str=0 ) ->int:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.image_embeds
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -10:]
a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ) ->Optional[Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
a = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
@skip_mps
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = torch_device == '''cpu'''
a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCAmelCase , test_mean_pixel_difference=__UpperCAmelCase , )
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :int = XGLMConfig
UpperCAmelCase_ :Union[str, Any] = {}
UpperCAmelCase_ :List[str] = "gelu"
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , ) -> Any:
lowerCAmelCase_ :Optional[Any] = parent
lowerCAmelCase_ :Tuple = batch_size
lowerCAmelCase_ :str = seq_length
lowerCAmelCase_ :Dict = is_training
lowerCAmelCase_ :Optional[int] = use_input_mask
lowerCAmelCase_ :int = use_labels
lowerCAmelCase_ :Dict = vocab_size
lowerCAmelCase_ :Optional[int] = d_model
lowerCAmelCase_ :Any = num_hidden_layers
lowerCAmelCase_ :str = num_attention_heads
lowerCAmelCase_ :Tuple = ffn_dim
lowerCAmelCase_ :List[Any] = activation_function
lowerCAmelCase_ :List[str] = activation_dropout
lowerCAmelCase_ :Union[str, Any] = attention_dropout
lowerCAmelCase_ :Optional[Any] = max_position_embeddings
lowerCAmelCase_ :Any = initializer_range
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :Tuple = 0
lowerCAmelCase_ :Optional[Any] = 2
lowerCAmelCase_ :List[Any] = 1
def __lowerCAmelCase ( self ) -> Any:
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Tuple = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCAmelCase_ :int = None
if self.use_input_mask:
lowerCAmelCase_ :str = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :List[str] = self.get_config()
lowerCAmelCase_ :List[str] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowerCAmelCase ( self ) -> Tuple:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCAmelCase , )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Optional[Any] = config_and_inputs
lowerCAmelCase_ :List[str] = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase_ :Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase_ :Dict = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase_ :Union[str, Any] = False
UpperCAmelCase_ :int = False
UpperCAmelCase_ :Optional[Any] = False
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = TFXGLMModelTester(self )
lowerCAmelCase_ :int = ConfigTester(self , config_class=__UpperCAmelCase , n_embd=37 )
def __lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Union[str, Any] = TFXGLMModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def __lowerCAmelCase ( self ) -> int:
super().test_resize_token_embeddings()
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self , __A=True ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase_ :Optional[int] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCAmelCase_ :str = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
lowerCAmelCase_ :str = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase )
@slow
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Optional[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase_ :List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
lowerCAmelCase_ :str = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
lowerCAmelCase_ :int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
lowerCAmelCase_ :Any = model.generate(__UpperCAmelCase , do_sample=__UpperCAmelCase , seed=[7, 0] )
lowerCAmelCase_ :Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase_ :Dict = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Any = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase_ :List[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase_ :Dict = """left"""
# use different length sentences to test batching
lowerCAmelCase_ :Optional[int] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
lowerCAmelCase_ :str = tokenizer(__UpperCAmelCase , return_tensors="""tf""" , padding=__UpperCAmelCase )
lowerCAmelCase_ :List[str] = inputs["""input_ids"""]
lowerCAmelCase_ :Tuple = model.generate(input_ids=__UpperCAmelCase , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
lowerCAmelCase_ :str = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCAmelCase_ :Dict = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
lowerCAmelCase_ :Tuple = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCAmelCase_ :Optional[int] = model.generate(input_ids=__UpperCAmelCase , max_new_tokens=12 )
lowerCAmelCase_ :int = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase_ :List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase_ :List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase_ :Any = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 84 |
from math import factorial
UpperCAmelCase__ = {str(digit): factorial(digit) for digit in range(10)}
def _a ( a :int ) -> int:
if not isinstance(a , a ):
raise TypeError('''Parameter number must be int''' )
if number < 0:
raise ValueError('''Parameter number must be greater than or equal to 0''' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def _a ( a :int = 60 , a :int = 1_000_000 ) -> int:
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError('''Parameters chain_length and number_limit must be int''' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'''Parameters chain_length and number_limit must be greater than 0''' )
# the counter for the chains with the exact desired length
a = 0
# the cached sizes of the previous chains
a = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
a = set()
a = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
a = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
a = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
a = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution()}""")
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A ={
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 163 |
def _a ( a :int = 100 ) -> int:
a = n * (n + 1) * (2 * n + 1) / 6
a = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 0 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase_ = 'bert-base-cased'
lowercase_ = 'google/pegasus-xsum'
lowercase_ = [' Sam ate lunch today.', 'Sams lunch ingredients.']
lowercase_ = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowercase_ = 'patrickvonplaten/t5-tiny-random'
lowercase_ = 'sshleifer/bart-tiny-random'
lowercase_ = 'sshleifer/tiny-mbart'
lowercase_ = 'sshleifer/tiny-marian-en-de'
def a ( A__ : Path , A__ : list ) -> Optional[int]:
"""simple docstring"""
_lowercase ='\n'.join(A__ )
Path(A__ ).open('w' ).writelines(A__ )
def a ( A__ : Dict ) -> Dict:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(A__ , F'''{split}.source''' ) , A__ )
_dump_articles(os.path.join(A__ , F'''{split}.target''' ) , A__ )
return tmp_dir
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A__ ( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(__UpperCAmelCase )
_lowercase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowercase =max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in ARTICLES )
_lowercase =max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in SUMMARIES )
_lowercase =4
_lowercase =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowercase , _lowercase ='ro_RO', 'de_DE' # ignored for all but mbart, but never causes error.
_lowercase =SeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='train' , max_source_length=__UpperCAmelCase , max_target_length=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , )
_lowercase =DataLoader(__UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowercase =shift_tokens_right(batch['labels'] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A__ ( self , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(__UpperCAmelCase )
_lowercase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowercase =max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in ARTICLES )
_lowercase =max(len(tokenizer.encode(__UpperCAmelCase ) ) for a in SUMMARIES )
_lowercase =4
_lowercase =LegacySeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='train' , max_source_length=20 , max_target_length=__UpperCAmelCase , )
_lowercase =DataLoader(__UpperCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' )
_lowercase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowercase =tmp_dir.joinpath('train.source' ).open().readlines()
_lowercase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__UpperCAmelCase , __UpperCAmelCase , 128 , __UpperCAmelCase )
_lowercase ={x.name for x in tmp_dir.iterdir()}
_lowercase ={x.name for x in save_dir.iterdir()}
_lowercase =save_dir.joinpath('train.source' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__UpperCAmelCase ) < len(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__UpperCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_lowercase , _lowercase , _lowercase =self._get_dataset(max_len=64 )
_lowercase =64
_lowercase =ds.make_dynamic_sampler(__UpperCAmelCase , required_batch_size_multiple=__UpperCAmelCase )
_lowercase =[len(__UpperCAmelCase ) for x in batch_sampler]
assert len(set(__UpperCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__UpperCAmelCase ) == len(__UpperCAmelCase ) # no dropped or added examples
_lowercase =DataLoader(__UpperCAmelCase , batch_sampler=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_lowercase =[]
_lowercase =[]
for batch in data_loader:
_lowercase =batch['input_ids'].shape
_lowercase =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowercase =np.product(batch['input_ids'].shape )
num_src_per_batch.append(__UpperCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__UpperCAmelCase )
assert num_src_per_batch[0] == max(__UpperCAmelCase )
if failures:
raise AssertionError(F'''too many tokens in {len(__UpperCAmelCase )} batches''' )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase =self._get_dataset(max_len=512 )
_lowercase =2
_lowercase =ds.make_sortish_sampler(__UpperCAmelCase , shuffle=__UpperCAmelCase )
_lowercase =DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
_lowercase =DataLoader(__UpperCAmelCase , batch_size=__UpperCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__UpperCAmelCase )
_lowercase =tokenizer.pad_token_id
def count_pad_tokens(lowerCAmelCase , lowerCAmelCase="input_ids" ):
return [batch[k].eq(__UpperCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__UpperCAmelCase , k='labels' ) ) < sum(count_pad_tokens(__UpperCAmelCase , k='labels' ) )
assert sum(count_pad_tokens(__UpperCAmelCase ) ) < sum(count_pad_tokens(__UpperCAmelCase ) )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
def A__ ( self , lowerCAmelCase=1_000 , lowerCAmelCase=128 ) -> Any:
'''simple docstring'''
if os.getenv('USE_REAL_DATA' , __UpperCAmelCase ):
_lowercase ='examples/seq2seq/wmt_en_ro'
_lowercase =max_len * 2 * 64
if not Path(__UpperCAmelCase ).joinpath('train.len' ).exists():
save_len_file(__UpperCAmelCase , __UpperCAmelCase )
else:
_lowercase ='examples/seq2seq/test_data/wmt_en_ro'
_lowercase =max_len * 4
save_len_file(__UpperCAmelCase , __UpperCAmelCase )
_lowercase =AutoTokenizer.from_pretrained(__UpperCAmelCase )
_lowercase =SeqaSeqDataset(
__UpperCAmelCase , data_dir=__UpperCAmelCase , type_path='train' , max_source_length=__UpperCAmelCase , max_target_length=__UpperCAmelCase , n_obs=__UpperCAmelCase , )
return ds, max_tokens, tokenizer
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase , _lowercase , _lowercase =self._get_dataset()
_lowercase =set(DistributedSortishSampler(__UpperCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__UpperCAmelCase ) )
_lowercase =set(DistributedSortishSampler(__UpperCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__UpperCAmelCase ) )
assert idsa.intersection(__UpperCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A__ ( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained(__UpperCAmelCase , use_fast=__UpperCAmelCase )
if tok_name == MBART_TINY:
_lowercase =SeqaSeqDataset(
__UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , )
_lowercase =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowercase =SeqaSeqDataset(
__UpperCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , )
_lowercase =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__UpperCAmelCase ) == 1 if tok_name == BART_TINY else len(__UpperCAmelCase ) == 0
| 205 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 | 0 |
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> str:
'''simple docstring'''
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 348 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 0 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
def __call__( self : Tuple ) -> List[Any]:
'''simple docstring'''
A__ : Optional[int] =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A__ : str =1
A__ : Tuple =self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
A__ : str =self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
A__ : List[str] =scheduler_output - scheduler_output + torch.ones_like(__UpperCAmelCase )
return result
| 134 |
from __future__ import annotations
import time
import numpy as np
UpperCAmelCase__ = [8, 5, 9, 7]
UpperCAmelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCAmelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[list[int]] , ) ->None:
"""simple docstring"""
a = claim_vector
a = allocated_resources_table
a = maximum_claim_table
def __lowerCAmelCase ( self : Any ) ->list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCAmelCase ( self : Optional[int] ) ->list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCAmelCase ( self : Union[str, Any] ) ->list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCAmelCase ( self : Tuple ) ->dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__UpperCAmelCase ): i for i in self.__need()}
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->None:
"""simple docstring"""
a = self.__need()
a = self.__allocated_resources_table
a = self.__available_resources()
a = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
a = False
for each_need in need_list:
a = True
for index, need in enumerate(__UpperCAmelCase ):
if need > available_resources[index]:
a = False
break
if execution:
a = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
a = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__UpperCAmelCase )
# update available/freed resources stack
a = np.array(__UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def __lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__UpperCAmelCase ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 0 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__UpperCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__UpperCAmelCase = concatenate_datasets
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadManager
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadConfig
__UpperCAmelCase = DownloadMode
__UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 29 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCAmelCase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"
),
"google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt",
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"google/electra-small-generator": (
"https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"
),
"google/electra-base-generator": (
"https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"
),
"google/electra-large-generator": (
"https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"
),
"google/electra-small-discriminator": (
"https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"
),
"google/electra-base-discriminator": (
"https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"
),
"google/electra-large-discriminator": (
"https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ = {
"google/electra-small-generator": 512,
"google/electra-base-generator": 512,
"google/electra-large-generator": 512,
"google/electra-small-discriminator": 512,
"google/electra-base-discriminator": 512,
"google/electra-large-discriminator": 512,
}
UpperCAmelCase__ = {
"google/electra-small-generator": {"do_lower_case": True},
"google/electra-base-generator": {"do_lower_case": True},
"google/electra-large-generator": {"do_lower_case": True},
"google/electra-small-discriminator": {"do_lower_case": True},
"google/electra-base-discriminator": {"do_lower_case": True},
"google/electra-large-discriminator": {"do_lower_case": True},
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_INIT_CONFIGURATION
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ElectraTokenizer
def __init__( self : Dict , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : str="[UNK]" , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : str="[PAD]" , __UpperCAmelCase : Optional[Any]="[CLS]" , __UpperCAmelCase : Union[str, Any]="[MASK]" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Optional[int] , ) ->str:
"""simple docstring"""
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __UpperCAmelCase ) != tokenize_chinese_chars
):
a = getattr(__UpperCAmelCase , normalizer_state.pop('''type''' ) )
a = do_lower_case
a = strip_accents
a = tokenize_chinese_chars
a = normalizer_class(**__UpperCAmelCase )
a = do_lower_case
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple=None ) ->str:
"""simple docstring"""
a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
a = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 0 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _a ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self : Any )->List[Any]:
_UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
_UpperCAmelCase = tokenizer('''This is me''' , return_tensors='''pt''' )
_UpperCAmelCase = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCAmelCase = model.generate(**__UpperCAmelCase )
_UpperCAmelCase = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCAmelCase = model_reloaded.generate(**__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase ) )
def lowercase__ ( self : List[Any] )->List[Any]:
_UpperCAmelCase = '''hf-internal-testing/tiny-random-t5'''
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
_UpperCAmelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCAmelCase ):
model.save_pretrained(__UpperCAmelCase )
_UpperCAmelCase = model.reverse_bettertransformer()
model.save_pretrained(__UpperCAmelCase )
| 260 |
def _a ( a :int ) -> bool:
a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 0 | 0 |
def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'{price_plus_tax(1_0_0, 0.25) = }')
print(F'{price_plus_tax(125.50, 0.05) = }')
| 305 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( lowercase__ , unittest.TestCase ):
snake_case__ : int = DiTPipeline
snake_case__ : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
snake_case__ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
torch.manual_seed(0 )
a_ : List[Any] = TransformeraDModel(
sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__UpperCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=1_0_0_0 , norm_type='ada_norm_zero' , norm_elementwise_affine=__UpperCAmelCase , )
a_ : Union[str, Any] = AutoencoderKL()
a_ : List[str] = DDIMScheduler()
a_ : Any = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def SCREAMING_SNAKE_CASE ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ) -> Any:
if str(__UpperCAmelCase ).startswith('mps' ):
a_ : Any = torch.manual_seed(__UpperCAmelCase )
else:
a_ : int = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a_ : str = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
a_ : List[Any] = 'cpu'
a_ : List[Any] = self.get_dummy_components()
a_ : Optional[Any] = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a_ : str = self.get_dummy_inputs(__UpperCAmelCase )
a_ : Dict = pipe(**__UpperCAmelCase ).images
a_ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 1_6, 1_6, 3) )
a_ : str = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
a_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase , 1E-3 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
self._test_inference_batch_single_identical(relax_max_difference=__UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
a_ : List[Any] = torch.manual_seed(0 )
a_ : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
a_ : Union[str, Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
a_ : Any = pipe.get_label_ids(__UpperCAmelCase )
a_ : List[str] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=4_0 , output_type='np' ).images
for word, image in zip(__UpperCAmelCase , __UpperCAmelCase ):
a_ : Union[str, Any] = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
a_ : str = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
a_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
a_ : Dict = ['vase', 'umbrella']
a_ : int = pipe.get_label_ids(__UpperCAmelCase )
a_ : List[str] = torch.manual_seed(0 )
a_ : List[str] = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2_5 , output_type='np' ).images
for word, image in zip(__UpperCAmelCase , __UpperCAmelCase ):
a_ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 32 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''t5'''
__snake_case = ['''past_key_values''']
__snake_case = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=32_128 , __UpperCAmelCase : List[Any]=512 , __UpperCAmelCase : Dict=64 , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : int=6 , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=128 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=1e-6 , __UpperCAmelCase : int=1.0 , __UpperCAmelCase : List[str]="relu" , __UpperCAmelCase : int=True , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , **__UpperCAmelCase : str , ) ->Optional[Any]:
"""simple docstring"""
a = vocab_size
a = d_model
a = d_kv
a = d_ff
a = num_layers
a = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a = num_heads
a = relative_attention_num_buckets
a = relative_attention_max_distance
a = dropout_rate
a = layer_norm_epsilon
a = initializer_factor
a = feed_forward_proj
a = use_cache
a = self.feed_forward_proj.split('''-''' )
a = act_info[-1]
a = act_info[0] == '''gated'''
if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a = '''gelu_new'''
super().__init__(
pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
class lowercase_ ( lowercase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
a = '''past_encoder_sequence + sequence'''
a = {0: '''batch'''}
a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
a = {0: '''batch''', 1: '''decoder_sequence'''}
a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
return 13
| 0 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( __lowercase ):
def __get__( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=None ) -> str:
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
lowerCAmelCase = '__cached_' + self.fget.__name__
lowerCAmelCase = getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if cached is None:
lowerCAmelCase = self.fget(__UpperCAmelCase )
setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return cached
def a_ ( lowerCamelCase : int ):
lowerCAmelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def a_ ( lowerCamelCase : str ):
if is_torch_fx_proxy(lowerCamelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCamelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCamelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCamelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase : Optional[Any] ):
return isinstance(lowerCamelCase , np.ndarray )
def a_ ( lowerCamelCase : Tuple ):
return _is_numpy(lowerCamelCase )
def a_ ( lowerCamelCase : Dict ):
import torch
return isinstance(lowerCamelCase , torch.Tensor )
def a_ ( lowerCamelCase : Tuple ):
return False if not is_torch_available() else _is_torch(lowerCamelCase )
def a_ ( lowerCamelCase : List[str] ):
import torch
return isinstance(lowerCamelCase , torch.device )
def a_ ( lowerCamelCase : str ):
return False if not is_torch_available() else _is_torch_device(lowerCamelCase )
def a_ ( lowerCamelCase : Any ):
import torch
if isinstance(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , lowerCamelCase ):
lowerCAmelCase = getattr(lowerCamelCase , lowerCamelCase )
else:
return False
return isinstance(lowerCamelCase , torch.dtype )
def a_ ( lowerCamelCase : Union[str, Any] ):
return False if not is_torch_available() else _is_torch_dtype(lowerCamelCase )
def a_ ( lowerCamelCase : str ):
import tensorflow as tf
return isinstance(lowerCamelCase , tf.Tensor )
def a_ ( lowerCamelCase : Optional[Any] ):
return False if not is_tf_available() else _is_tensorflow(lowerCamelCase )
def a_ ( lowerCamelCase : Optional[int] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCamelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCamelCase )
return type(lowerCamelCase ) == tf.Tensor
def a_ ( lowerCamelCase : str ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCamelCase )
def a_ ( lowerCamelCase : Union[str, Any] ):
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCamelCase , jnp.ndarray )
def a_ ( lowerCamelCase : List[str] ):
return False if not is_flax_available() else _is_jax(lowerCamelCase )
def a_ ( lowerCamelCase : Tuple ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return [to_py_obj(lowerCamelCase ) for o in obj]
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase ).tolist()
elif isinstance(lowerCamelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a_ ( lowerCamelCase : Dict ):
if isinstance(lowerCamelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCamelCase ) for k, v in obj.items()}
elif isinstance(lowerCamelCase , (list, tuple) ):
return np.array(lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCamelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCamelCase ):
return np.asarray(lowerCamelCase )
else:
return obj
class UpperCAmelCase_ ( __lowercase ):
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = fields(self )
# Safety and consistency checks
if not len(__UpperCAmelCase ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
lowerCAmelCase = getattr(self , class_fields[0].name )
lowerCAmelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__UpperCAmelCase ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase = first_field.items()
lowerCAmelCase = True
else:
try:
lowerCAmelCase = iter(__UpperCAmelCase )
lowerCAmelCase = True
except TypeError:
lowerCAmelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__UpperCAmelCase ):
if (
not isinstance(__UpperCAmelCase , (list, tuple) )
or not len(__UpperCAmelCase ) == 2
or not isinstance(element[0] , __UpperCAmelCase )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase = element[1]
elif first_field is not None:
lowerCAmelCase = first_field
else:
for field in class_fields:
lowerCAmelCase = getattr(self , field.name )
if v is not None:
lowerCAmelCase = v
def __delitem__( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def __UpperCAmelCase ( self : str , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : int ) -> Optional[Any]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def __UpperCAmelCase ( self : List[str] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Optional[int] ) -> str:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def __UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : List[str] ) -> List[Any]:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Optional[Any] , UpperCAmelCase__ : int ) -> Union[str, Any]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] ) -> Any:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__UpperCAmelCase , __UpperCAmelCase )
super().__setattr__(__UpperCAmelCase , __UpperCAmelCase )
def __setitem__( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
super().__setitem__(__UpperCAmelCase , __UpperCAmelCase )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__UpperCAmelCase , __UpperCAmelCase )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( __lowercase , __lowercase ):
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , UpperCAmelCase__ : Dict ) -> List[str]:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''longest'''
lowerCamelCase : Optional[int] = '''max_length'''
lowerCamelCase : Optional[int] = '''do_not_pad'''
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = '''pt'''
lowerCamelCase : Tuple = '''tf'''
lowerCamelCase : Tuple = '''np'''
lowerCamelCase : Dict = '''jax'''
class UpperCAmelCase_ :
def __init__( self : Optional[int] , UpperCAmelCase__ : List[ContextManager] ) -> Optional[Any]:
lowerCAmelCase = context_managers
lowerCAmelCase = ExitStack()
def __enter__( self : str ) -> Dict:
for context_manager in self.context_managers:
self.stack.enter_context(__UpperCAmelCase )
def __exit__( self : int , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
self.stack.__exit__(*__UpperCAmelCase , **__UpperCAmelCase )
def a_ ( lowerCamelCase : List[Any] ):
lowerCAmelCase = infer_framework(lowerCamelCase )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a_ ( lowerCamelCase : Union[str, Any] ):
lowerCAmelCase = model_class.__name__
lowerCAmelCase = infer_framework(lowerCamelCase )
if framework == "tf":
lowerCAmelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a_ ( lowerCamelCase : MutableMapping , lowerCamelCase : str = "" , lowerCamelCase : str = "." ):
def _flatten_dict(lowerCamelCase : Optional[Any] , lowerCamelCase : int="" , lowerCamelCase : List[str]="." ):
for k, v in d.items():
lowerCAmelCase = str(lowerCamelCase ) + delimiter + str(lowerCamelCase ) if parent_key else k
if v and isinstance(lowerCamelCase , lowerCamelCase ):
yield from flatten_dict(lowerCamelCase , lowerCamelCase , delimiter=lowerCamelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase ) )
@contextmanager
def a_ ( lowerCamelCase : Any , lowerCamelCase : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : List[str]=None ):
if is_numpy_array(lowerCamelCase ):
return np.transpose(lowerCamelCase , axes=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.T if axes is None else array.permute(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.transpose(lowerCamelCase , perm=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.transpose(lowerCamelCase , axes=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for transpose: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : int , lowerCamelCase : Optional[int] ):
if is_numpy_array(lowerCamelCase ):
return np.reshape(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.reshape(*lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.reshape(lowerCamelCase , lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.reshape(lowerCamelCase , lowerCamelCase )
else:
raise ValueError(f'''Type not supported for reshape: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : int=None ):
if is_numpy_array(lowerCamelCase ):
return np.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.squeeze(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.squeeze(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for squeeze: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Optional[int] ):
if is_numpy_array(lowerCamelCase ):
return np.expand_dims(lowerCamelCase , lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.unsqueeze(dim=lowerCamelCase )
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCamelCase , axis=lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return jnp.expand_dims(lowerCamelCase , axis=lowerCamelCase )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : List[Any] ):
if is_numpy_array(lowerCamelCase ):
return np.size(lowerCamelCase )
elif is_torch_tensor(lowerCamelCase ):
return array.numel()
elif is_tf_tensor(lowerCamelCase ):
import tensorflow as tf
return tf.size(lowerCamelCase )
elif is_jax_tensor(lowerCamelCase ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(lowerCamelCase )}.''' )
def a_ ( lowerCamelCase : List[Any] , lowerCamelCase : int ):
for key, value in auto_map.items():
if isinstance(lowerCamelCase , (tuple, list) ):
lowerCAmelCase = [f'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase = f'''{repo_id}--{value}'''
return auto_map
def a_ ( lowerCamelCase : str ):
for base_class in inspect.getmro(lowerCamelCase ):
lowerCAmelCase = base_class.__module__
lowerCAmelCase = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 4 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {}
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''llama'''
__snake_case = ['''past_key_values''']
def __init__( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=32_000 , __UpperCAmelCase : str=4_096 , __UpperCAmelCase : int=11_008 , __UpperCAmelCase : Tuple=32 , __UpperCAmelCase : Optional[int]=32 , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Union[str, Any]="silu" , __UpperCAmelCase : Tuple=2_048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=1e-6 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Optional[int]=0 , __UpperCAmelCase : Optional[int]=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : Tuple , ) ->str:
"""simple docstring"""
a = vocab_size
a = max_position_embeddings
a = hidden_size
a = intermediate_size
a = num_hidden_layers
a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
a = num_attention_heads
a = num_key_value_heads
a = hidden_act
a = initializer_range
a = rms_norm_eps
a = pretraining_tp
a = use_cache
a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , **__UpperCAmelCase , )
def __lowerCAmelCase ( self : Tuple ) ->Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
a = self.rope_scaling.get('''type''' , __UpperCAmelCase )
a = self.rope_scaling.get('''factor''' , __UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 0 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_A : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_A : Optional[Any] ={
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_A : Dict ={
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
_A : str ={
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class _lowercase ( _lowercase ):
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_INIT_CONFIGURATION
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ElectraTokenizer
def __init__( self: Dict , UpperCamelCase__: int=None , UpperCamelCase__: str=None , UpperCamelCase__: Optional[int]=True , UpperCamelCase__: str="[UNK]" , UpperCamelCase__: Any="[SEP]" , UpperCamelCase__: str="[PAD]" , UpperCamelCase__: Optional[Any]="[CLS]" , UpperCamelCase__: Union[str, Any]="[MASK]" , UpperCamelCase__: List[str]=True , UpperCamelCase__: Tuple=None , **UpperCamelCase__: Optional[int] , ):
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCamelCase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : Union[str, Any] = getattr(__UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCamelCase__ : Any = do_lower_case
lowerCamelCase__ : Tuple = strip_accents
lowerCamelCase__ : Dict = tokenize_chinese_chars
lowerCamelCase__ : List[Any] = normalizer_class(**__UpperCAmelCase )
lowerCamelCase__ : List[Any] = do_lower_case
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple=None ):
lowerCamelCase__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[int] , UpperCamelCase__: Optional[List[int]] = None ):
lowerCamelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: str , UpperCamelCase__: Optional[str] = None ):
lowerCamelCase__ : Dict = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 41 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "contact@muhammadumerfarooq.me"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase__ = logging.getLogger()
def _a ( ) -> Optional[int]:
a = argparse.ArgumentParser()
parser.add_argument('''-f''' )
a = parser.parse_args()
return args.f
def _a ( a :Any ) -> Tuple:
a = {}
a = os.path.join(a , '''all_results.json''' )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
a = json.load(a )
else:
raise ValueError(F"""can't find {path}""" )
return results
def _a ( ) -> int:
a = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
UpperCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ ( lowercase ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : str ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
a = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 100 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = 7 if get_gpu_count() > 1 else 2
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Any ) ->int:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''translation_no_trainer''' ) ) )
@slow
def __lowerCAmelCase ( self : List[str] ) ->int:
"""simple docstring"""
a = logging.StreamHandler(sys.stdout )
logger.addHandler(__UpperCAmelCase )
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = self.get_auto_remove_tmp_dir()
a = F"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
a = get_results(__UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(__UpperCAmelCase , '''image_classification_no_trainer''' ) ) )
| 0 | 0 |
'''simple docstring'''
__A =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__A =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__A ={
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
assert len(str(UpperCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
UpperCAmelCase__ : Any = year // 1_0_0
UpperCAmelCase__ : Dict = (5 * (century % 4) + 2) % 7
UpperCAmelCase__ : Union[str, Any] = year % 1_0_0
UpperCAmelCase__ : Dict = centurian % 1_2
UpperCAmelCase__ : int = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
UpperCAmelCase__ : List[Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
UpperCAmelCase__ : Tuple = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 163 |
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['MobileViTFeatureExtractor']
lowercase_ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 |
def _a ( a :float , a :float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(100, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 0 | 0 |