code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def _a ( lowerCamelCase ): if not numbers: return 0 if not isinstance(lowerCamelCase, (list, tuple) ) or not all( isinstance(lowerCamelCase, lowerCamelCase ) for number in numbers ): raise ValueError("""numbers must be an iterable of integers""" ) lowerCamelCase : Any = numbers[0] for i in range(1, len(lowerCamelCase ) ): # update the maximum and minimum subarray products lowerCamelCase : str = numbers[i] if number < 0: lowerCamelCase , lowerCamelCase : Dict = min_till_now, max_till_now lowerCamelCase : List[Any] = max(lowerCamelCase, max_till_now * number ) lowerCamelCase : Dict = min(lowerCamelCase, min_till_now * number ) # update the maximum product found till now lowerCamelCase : Any = max(lowerCamelCase, lowerCamelCase ) return max_prod
681
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase ="""▁""" _lowerCamelCase =get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase): _UpperCAmelCase : str = BertGenerationTokenizer _UpperCAmelCase : Tuple = False _UpperCAmelCase : List[Any] = True def UpperCamelCase__ ( self ): super().setUp() lowerCamelCase : int = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ ( self ): lowerCamelCase : List[str] = """<s>""" lowerCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<pad>""" ) self.assertEqual(len(__magic_name__ ) , 1_0_0_2 ) def UpperCamelCase__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def UpperCamelCase__ ( self ): lowerCamelCase : Tuple = BertGenerationTokenizer(__magic_name__ , keep_accents=__magic_name__ ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__magic_name__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowerCamelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__magic_name__ ) self.assertListEqual( __magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase__ ( self ): return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : List[Any] = """Hello World!""" lowerCamelCase : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @slow def UpperCamelCase__ ( self ): lowerCamelCase : str = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) lowerCamelCase : str = [ 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, ] self.assertListEqual(__magic_name__ , self.big_tokenizer.encode(__magic_name__ ) ) @require_torch @slow def UpperCamelCase__ ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowerCamelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowerCamelCase : Dict = """ """.join(__magic_name__ ) lowerCamelCase : Any = self.big_tokenizer.encode_plus(__magic_name__ , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__magic_name__ ) lowerCamelCase : Tuple = BertGenerationConfig() lowerCamelCase : Optional[int] = BertGenerationEncoder(__magic_name__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__magic_name__ ) model(**__magic_name__ ) @slow def UpperCamelCase__ ( self ): # fmt: off lowerCamelCase : Any = {"""input_ids""": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
681
1
'''simple docstring''' from functools import reduce A =( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def snake_case_ (_a : str = N ): return max( # mypy cannot properly interpret reduce int(reduce(lambda _a , _a : str(int(_a ) * int(_a ) ) , n[i : i + 1_3] ) ) for i in range(len(_a ) - 1_2 ) ) if __name__ == "__main__": print(f"""{solution() = }""")
358
'''simple docstring''' import argparse import math import traceback import dateutil.parser as date_parser import requests def snake_case_ (_a : List[str] ): UpperCAmelCase = {} UpperCAmelCase = job['''started_at'''] UpperCAmelCase = job['''completed_at'''] UpperCAmelCase = date_parser.parse(_a ) UpperCAmelCase = date_parser.parse(_a ) UpperCAmelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) UpperCAmelCase = start UpperCAmelCase = end UpperCAmelCase = duration_in_min return job_info def snake_case_ (_a : str , _a : List[str]=None ): UpperCAmelCase = None if token is not None: UpperCAmelCase = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"Bearer {token}"} UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" UpperCAmelCase = requests.get(_a , headers=_a ).json() UpperCAmelCase = {} try: job_time.update({job['''name''']: extract_time_from_single_job(_a ) for job in result['''jobs''']} ) UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 ) for i in range(_a ): UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=_a ).json() job_time.update({job['''name''']: extract_time_from_single_job(_a ) for job in result['''jobs''']} ) return job_time except Exception: print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" ) return {} if __name__ == "__main__": A =argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') A =parser.parse_args() A =get_job_time(args.workflow_run_id) A =dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"""{k}: {v["duration"]}""")
358
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( __snake_case : int ): _A = abs(__snake_case ) _A = 0 while n > 0: res += n % 1_0 n //= 1_0 return res def _SCREAMING_SNAKE_CASE ( __snake_case : int ): _A = abs(__snake_case ) return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 ) def _SCREAMING_SNAKE_CASE ( __snake_case : int ): return sum(int(__snake_case ) for c in str(abs(__snake_case ) ) ) def _SCREAMING_SNAKE_CASE ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__snake_case : Callable , __snake_case : int ) -> None: _A = F'{func.__name__}({value})' _A = timeit(F'__main__.{call}' , setup='import __main__' ) print(F'{call:56} = {func(__snake_case )} -- {timing:.4f} seconds' ) for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(__snake_case , __snake_case ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
107
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCAmelCase : str = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
107
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : Optional[Any] = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowercase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ : Optional[int] = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) A : List[Any] = num_channels A : Optional[Any] = patch_sizes A : Optional[int] = patch_stride A : Union[str, Any] = patch_padding A : Union[str, Any] = embed_dim A : Optional[Any] = num_heads A : Any = depth A : Optional[int] = mlp_ratio A : Optional[int] = attention_drop_rate A : Optional[int] = drop_rate A : Tuple = drop_path_rate A : Tuple = qkv_bias A : Dict = cls_token A : List[Any] = qkv_projection_method A : Union[str, Any] = kernel_qkv A : Union[str, Any] = padding_kv A : Union[str, Any] = stride_kv A : str = padding_q A : Union[str, Any] = stride_q A : Dict = initializer_range A : List[str] = layer_norm_eps
423
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : List[str] = logging.get_logger(__name__) lowercase : Optional[Any] = { "microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json", # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowercase ( _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ : Optional[int] = '''cvt''' def __init__( self , __UpperCAmelCase=3 , __UpperCAmelCase=[7, 3, 3] , __UpperCAmelCase=[4, 2, 2] , __UpperCAmelCase=[2, 1, 1] , __UpperCAmelCase=[64, 1_92, 3_84] , __UpperCAmelCase=[1, 3, 6] , __UpperCAmelCase=[1, 2, 10] , __UpperCAmelCase=[4.0, 4.0, 4.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.0] , __UpperCAmelCase=[0.0, 0.0, 0.1] , __UpperCAmelCase=[True, True, True] , __UpperCAmelCase=[False, False, True] , __UpperCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , __UpperCAmelCase=[3, 3, 3] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[2, 2, 2] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=[1, 1, 1] , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , **__UpperCAmelCase , ) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) A : List[Any] = num_channels A : Optional[Any] = patch_sizes A : Optional[int] = patch_stride A : Union[str, Any] = patch_padding A : Union[str, Any] = embed_dim A : Optional[Any] = num_heads A : Any = depth A : Optional[int] = mlp_ratio A : Optional[int] = attention_drop_rate A : Optional[int] = drop_rate A : Tuple = drop_path_rate A : Tuple = qkv_bias A : Dict = cls_token A : List[Any] = qkv_projection_method A : Union[str, Any] = kernel_qkv A : Union[str, Any] = padding_kv A : Union[str, Any] = stride_kv A : str = padding_q A : Union[str, Any] = stride_q A : Dict = initializer_range A : List[str] = layer_norm_eps
423
1
"""simple docstring""" from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup __A : Dict = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def lowercase ( __snake_case : str = "mumbai" ): lowercase_ : List[str] = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ): lowercase_ : Union[str, Any] = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() lowercase_ : Optional[Any] = job.find('''span''' , {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
231
def _lowerCAmelCase ( A__: list[list] ): '''simple docstring''' UpperCAmelCase = current_set.copy() for row_index, row in enumerate(A__ ): UpperCAmelCase = row[0] for column_index, column in enumerate(A__ ): if magnitude == 0: UpperCAmelCase = column continue UpperCAmelCase = column / magnitude # Subtract to cancel term UpperCAmelCase = current_set[0] UpperCAmelCase = [first_row] UpperCAmelCase = current_set[1::] for row in current_set: UpperCAmelCase = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(A__ ) continue for column_index in range(len(A__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(A__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: UpperCAmelCase = final_set[0] UpperCAmelCase = [] UpperCAmelCase = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) UpperCAmelCase = simplify(A__ ) for i in range(len(A__ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , A__ ) UpperCAmelCase = resultant return final_set def _lowerCAmelCase ( A__: list[list] ): '''simple docstring''' if len(A__ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) UpperCAmelCase = len(A__ ) + 1 if any(len(A__ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(A__ , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(A__ ) == 1: return [equations[0][-1] / equations[0][0]] UpperCAmelCase = equations.copy() if any(0 in row for row in data_set ): UpperCAmelCase = data_set.copy() UpperCAmelCase = [] for row_index, row in enumerate(A__ ): if 0 not in row: UpperCAmelCase = data_set.pop(A__ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , A__ ) UpperCAmelCase = data_set.copy() UpperCAmelCase = simplify(A__ ) UpperCAmelCase = simplified[::-1] UpperCAmelCase = [] for row in simplified: UpperCAmelCase = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue UpperCAmelCase = row.copy()[: len(A__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(A__ ) == 0: solutions.append(0 ) continue UpperCAmelCase = temp_row[1::] UpperCAmelCase = temp_row[::-1] for column_index, column in enumerate(A__ ): current_solution -= column * solutions[column_index] solutions.append(A__ ) UpperCAmelCase = [] for item in solutions: final.append(float(round(A__ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() __magic_name__ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
254
0
'''simple docstring''' def lowercase ( lowerCAmelCase : Union[str, Any]): """simple docstring""" _A : Any = [0] * len(lowerCAmelCase) _A : Optional[Any] = [] _A : Any = [] _A : Any = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCAmelCase)): if indegree[i] == 0: queue.append(lowerCAmelCase) while queue: _A : List[Any] = queue.pop(0) cnt += 1 topo.append(lowerCAmelCase) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(lowerCAmelCase) if cnt != len(lowerCAmelCase): print('''Cycle exists''') else: print(lowerCAmelCase) # Adjacency List of Graph __UpperCamelCase : List[Any] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
417
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __UpperCamelCase : str = logging.get_logger(__name__) __UpperCamelCase : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp __UpperCamelCase : Optional[Any] = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } __UpperCamelCase : Tuple = { '''RUCAIBox/mvp''': 1024, } class lowerCamelCase__ ( snake_case_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["""input_ids""", """attention_mask"""] __magic_name__ = MvpTokenizer def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__="replace" , UpperCAmelCase__="<s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="</s>" , UpperCAmelCase__="<s>" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="<mask>" , UpperCAmelCase__=False , UpperCAmelCase__=True , **UpperCAmelCase__ , ) -> List[Any]: super().__init__( UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , errors=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , **UpperCAmelCase__ , ) _A : Any = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space: _A : Dict = getattr(UpperCAmelCase__ , pre_tok_state.pop('''type''' ) ) _A : List[Any] = add_prefix_space _A : Tuple = pre_tok_class(**UpperCAmelCase__ ) _A : List[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _A : Any = '''post_processor''' _A : Union[str, Any] = getattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ ) if tokenizer_component_instance: _A : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _A : int = tuple(state['''sep'''] ) if "cls" in state: _A : Union[str, Any] = tuple(state['''cls'''] ) _A : int = False if state.get('''add_prefix_space''' , UpperCAmelCase__ ) != add_prefix_space: _A : Optional[int] = add_prefix_space _A : Union[str, Any] = True if state.get('''trim_offsets''' , UpperCAmelCase__ ) != trim_offsets: _A : List[str] = trim_offsets _A : int = True if changes_to_apply: _A : Optional[int] = getattr(UpperCAmelCase__ , state.pop('''type''' ) ) _A : str = component_class(**UpperCAmelCase__ ) setattr(self.backend_tokenizer , UpperCAmelCase__ , UpperCAmelCase__ ) @property def _lowerCamelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Tuple: _A : Any = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else value _A : Any = value def _lowerCamelCase ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> BatchEncoding: _A : Optional[int] = kwargs.get('''is_split_into_words''' , UpperCAmelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ ) def _lowerCamelCase ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ) -> BatchEncoding: _A : int = kwargs.get('''is_split_into_words''' , UpperCAmelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*UpperCAmelCase__ , **UpperCAmelCase__ ) def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> Tuple[str]: _A : List[Any] = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ ) return tuple(UpperCAmelCase__ ) def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__=None ) -> Tuple: _A : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ) -> List[int]: _A : str = [self.sep_token_id] _A : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
417
1
def lowerCamelCase__ ( __A :int ,__A :float ,__A :float ): """simple docstring""" return round(float(moles / volume ) * nfactor ) def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ): """simple docstring""" return round(float((moles * 0.0_821 * temperature) / (volume) ) ) def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ): """simple docstring""" return round(float((moles * 0.0_821 * temperature) / (pressure) ) ) def lowerCamelCase__ ( __A :float ,__A :float ,__A :float ): """simple docstring""" return round(float((pressure * volume) / (0.0_821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
268
from __future__ import annotations class __snake_case : """simple docstring""" def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Tuple: """simple docstring""" __snake_case , __snake_case = text, pattern __snake_case , __snake_case = len(_UpperCamelCase ), len(_UpperCamelCase ) def a ( self , _UpperCamelCase ) -> int: """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def a ( self , _UpperCamelCase ) -> int: """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def a ( self ) -> list[int]: """simple docstring""" __snake_case = [] for i in range(self.textLen - self.patLen + 1 ): __snake_case = self.mismatch_in_text(_UpperCamelCase ) if mismatch_index == -1: positions.append(_UpperCamelCase ) else: __snake_case = self.match_in_pattern(self.text[mismatch_index] ) __snake_case = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions UpperCamelCase__ = '''ABAABA''' UpperCamelCase__ = '''AB''' UpperCamelCase__ = BoyerMooreSearch(text, pattern) UpperCamelCase__ = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
268
1
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__=1_0 ): UpperCamelCase__ : List[str] = [] for _ in range(lowerCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__=1_0 ): UpperCamelCase__ : str = [] for step in range(lowerCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : Optional[Any] = os.path.join(lowerCamelCase_ , '''schedule.bin''' ) torch.save(scheduler.state_dict() , lowerCamelCase_ ) UpperCamelCase__ : int = torch.load(lowerCamelCase_ ) scheduler.load_state_dict(lowerCamelCase_ ) return lrs @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: """simple docstring""" UpperCamelCase__ : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase ) UpperCamelCase__ : Optional[int] = torch.tensor([0.4, 0.2, -0.5] ) UpperCamelCase__ : List[str] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCamelCase__ : Dict = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): UpperCamelCase__ : Optional[int] = criterion(__lowerCamelCase , __lowerCamelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: """simple docstring""" UpperCamelCase__ : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowerCamelCase ) UpperCamelCase__ : Optional[Any] = torch.tensor([0.4, 0.2, -0.5] ) UpperCamelCase__ : List[str] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping UpperCamelCase__ : List[str] = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowerCamelCase , weight_decay=0.0 , relative_step=__lowerCamelCase , scale_parameter=__lowerCamelCase , warmup_init=__lowerCamelCase , ) for _ in range(1_0_0_0 ): UpperCamelCase__ : Tuple = criterion(__lowerCamelCase , __lowerCamelCase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = nn.Linear(50 , 50 ) if is_torch_available() else None SCREAMING_SNAKE_CASE_ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None SCREAMING_SNAKE_CASE_ = 10 def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Any: """simple docstring""" self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) ) for a, b in zip(__lowerCamelCase , __lowerCamelCase ): self.assertAlmostEqual(__lowerCamelCase , __lowerCamelCase , delta=__lowerCamelCase , msg=__lowerCamelCase ) def __SCREAMING_SNAKE_CASE ( self ) -> int: """simple docstring""" UpperCamelCase__ : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) UpperCamelCase__ : Optional[Any] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): UpperCamelCase__ : List[Any] = data UpperCamelCase__ : str = scheduler_func(self.optimizer , **__lowerCamelCase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) UpperCamelCase__ : Union[str, Any] = unwrap_schedule(__lowerCamelCase , self.num_steps ) self.assertListAlmostEqual( __lowerCamelCase , __lowerCamelCase , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) UpperCamelCase__ : Union[str, Any] = scheduler_func(self.optimizer , **__lowerCamelCase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(__lowerCamelCase ) # wrap to test picklability of the schedule UpperCamelCase__ : Optional[int] = unwrap_and_save_reload_schedule(__lowerCamelCase , self.num_steps ) self.assertListEqual(__lowerCamelCase , __lowerCamelCase , msg=F'''failed for {scheduler_func} in save and reload''' ) class _lowerCamelCase : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" UpperCamelCase__ : Any = fn def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return self.fn(*__lowerCamelCase , **__lowerCamelCase ) @classmethod def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" UpperCamelCase__ : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
702
from __future__ import annotations from collections.abc import Callable def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1_0_0 , ): UpperCamelCase__ : Union[str, Any] = x_start UpperCamelCase__ : List[Any] = fnc(UpperCamelCase__ ) UpperCamelCase__ : Any = 0.0 for _ in range(UpperCamelCase__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCamelCase__ : str = (x_end - x_start) / steps + xa UpperCamelCase__ : Dict = fnc(UpperCamelCase__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCamelCase__ : Tuple = xa UpperCamelCase__ : Union[str, Any] = fxa return area if __name__ == "__main__": def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ): return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") lowerCamelCase =1_0 while i <= 1_0_0_0_0_0: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 1_0
462
0
A : List[str] = [ (1_0_0_0, 'M'), (9_0_0, 'CM'), (5_0_0, 'D'), (4_0_0, 'CD'), (1_0_0, 'C'), (9_0, 'XC'), (5_0, 'L'), (4_0, 'XL'), (1_0, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'), (1, 'I'), ] def UpperCamelCase ( __magic_name__ : str ) -> int: """simple docstring""" lowercase__ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000} lowercase__ = 0 lowercase__ = 0 while place < len(__magic_name__ ): if (place + 1 < len(__magic_name__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def UpperCamelCase ( __magic_name__ : int ) -> str: """simple docstring""" lowercase__ = [] for arabic, roman in ROMAN: ((lowercase__) , (lowercase__)) = divmod(__magic_name__ , __magic_name__ ) result.append(roman * factor ) if number == 0: break return "".join(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
15
import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def UpperCamelCase ( __magic_name__ : Dict , __magic_name__ : List[str]=7 ) -> Dict: """simple docstring""" lowercase__ = None if token is not None: lowercase__ = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''} # The id of a workflow (not of a workflow run) lowercase__ = """636036""" lowercase__ = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' lowercase__ = requests.get(__magic_name__ , headers=__magic_name__ ).json() return result["workflow_runs"] def UpperCamelCase ( __magic_name__ : str ) -> Dict: """simple docstring""" lowercase__ = get_daily_ci_runs(__magic_name__ ) lowercase__ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase__ = workflow_run["""id"""] break return workflow_run_id def UpperCamelCase ( __magic_name__ : str , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> str: """simple docstring""" lowercase__ = get_last_daily_ci_runs(__magic_name__ ) if workflow_run_id is not None: lowercase__ = get_artifacts_links(worflow_run_id=__magic_name__ , token=__magic_name__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase__ = artifacts_links[artifact_name] download_artifact( artifact_name=__magic_name__ , artifact_url=__magic_name__ , output_dir=__magic_name__ , token=__magic_name__ ) def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" get_last_daily_ci_artifacts(__magic_name__ , __magic_name__ , __magic_name__ ) lowercase__ = {} for artifact_name in artifact_names: lowercase__ = os.path.join(__magic_name__ , f'''{artifact_name}.zip''' ) if os.path.isfile(__magic_name__ ): lowercase__ = {} with zipfile.ZipFile(__magic_name__ ) as z: for filename in z.namelist(): if not os.path.isdir(__magic_name__ ): # read the file with z.open(__magic_name__ ) as f: lowercase__ = f.read().decode("""UTF-8""" ) return results
15
1
import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __snake_case : def __init__( self , _A , _A=99 , _A=13 , _A=16 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=32 , _A=4 , _A=4 , _A=30 , _A=0 , _A=1 , _A=2 , _A=None , ): SCREAMING_SNAKE_CASE_ = parent SCREAMING_SNAKE_CASE_ = batch_size SCREAMING_SNAKE_CASE_ = decoder_seq_length # For common tests SCREAMING_SNAKE_CASE_ = self.decoder_seq_length SCREAMING_SNAKE_CASE_ = is_training SCREAMING_SNAKE_CASE_ = use_attention_mask SCREAMING_SNAKE_CASE_ = use_labels SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = eos_token_id SCREAMING_SNAKE_CASE_ = bos_token_id SCREAMING_SNAKE_CASE_ = pad_token_id SCREAMING_SNAKE_CASE_ = decoder_start_token_id SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = decoder_seq_length SCREAMING_SNAKE_CASE_ = 2 SCREAMING_SNAKE_CASE_ = 1 def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = None if self.use_attention_mask: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ = None if self.use_labels: SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def lowerCAmelCase__ ( self , _A , _A , _A , _A , ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = TrOCRDecoder(config=_A).to(_A).eval() SCREAMING_SNAKE_CASE_ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) SCREAMING_SNAKE_CASE_ = model(_A) SCREAMING_SNAKE_CASE_ = model(_A , use_cache=_A) self.parent.assertTrue(len(_A) == len(_A)) self.parent.assertTrue(len(_A) == len(_A) + 1) SCREAMING_SNAKE_CASE_ = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE_ = ids_tensor((2, 1) , config.vocab_size - 1) + 1 # append to next input_ids and SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1) SCREAMING_SNAKE_CASE_ = model(_A)['last_hidden_state'] SCREAMING_SNAKE_CASE_ = model(_A , past_key_values=_A)['last_hidden_state'] # select random slice SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1]).item() SCREAMING_SNAKE_CASE_ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() SCREAMING_SNAKE_CASE_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(_A , _A , atol=1E-3) def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class __snake_case ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Union[str, Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : str = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Any = True __lowerCAmelCase : str = False def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = TrOCRStandaloneDecoderModelTester(self , is_training=_A) SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A) def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): pass def lowerCAmelCase__ ( self): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self): SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*_A) def lowerCAmelCase__ ( self): return @unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :) def lowerCAmelCase__ ( self): pass
705
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Any = { "configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"], "tokenization_mvp": ["MvpTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[int] = ["MvpTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ "MVP_PRETRAINED_MODEL_ARCHIVE_LIST", "MvpForCausalLM", "MvpForConditionalGeneration", "MvpForQuestionAnswering", "MvpForSequenceClassification", "MvpModel", "MvpPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
620
0
"""simple docstring""" def _snake_case ( snake_case__ : List[str] , snake_case__ : Optional[int] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) A = (boundary[1] - boundary[0]) / steps A = boundary[0] A = boundary[1] A = make_points(snake_case__ , snake_case__ , snake_case__ ) A = 0.0 y += (h / 2.0) * f(snake_case__ ) for i in x_i: # print(i) y += h * f(snake_case__ ) y += (h / 2.0) * f(snake_case__ ) return y def _snake_case ( snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Any ): A = a + h while x < (b - h): yield x A = x + h def _snake_case ( snake_case__ : Optional[Any] ): # enter your function here A = (x - 0) * (x - 0) return y def _snake_case ( ): A = 0.0 # Lower bound of integration A = 1.0 # Upper bound of integration A = 10.0 # define number of steps or resolution A = [a, b] # define boundary of integration A = method_a(snake_case__ , snake_case__ ) print(F'y = {y}' ) if __name__ == "__main__": main()
91
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
1
import torch from diffusers import DiffusionPipeline class __a ( __UpperCamelCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' super().__init__() self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) def __call__( self ) -> int: '''simple docstring''' lowercase__: int = torch.randn( (1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , ) lowercase__: Dict = 1 lowercase__: str = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample lowercase__: Dict = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample lowercase__: str = scheduler_output - scheduler_output + torch.ones_like(lowerCAmelCase__ ) return result
335
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class __a ( __UpperCamelCase , unittest.TestCase ): __lowercase : Optional[int] = AlbertTokenizer __lowercase : str = AlbertTokenizerFast __lowercase : List[Any] = True __lowercase : int = True __lowercase : Any = True def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__: Optional[int] = AlbertTokenizer(lowerCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Dict: '''simple docstring''' lowercase__: Dict = 'this is a test' lowercase__: List[str] = 'this is a test' return input_text, output_text def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase__: Dict = '<pad>' lowercase__: Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' lowercase__: Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '▁eloquent' ) self.assertEqual(len(lowerCAmelCase__ ) , 30_000 ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' if not self.test_rust_tokenizer: return lowercase__: Optional[int] = self.get_tokenizer() lowercase__: List[Any] = self.get_rust_tokenizer() lowercase__: str = 'I was born in 92000, and this is falsé.' lowercase__: Any = tokenizer.tokenize(lowerCAmelCase__ ) lowercase__: Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: Union[str, Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) lowercase__: Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase__: int = self.get_rust_tokenizer() lowercase__: str = tokenizer.encode(lowerCAmelCase__ ) lowercase__: str = rust_tokenizer.encode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: Union[str, Any] = AlbertTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ ) lowercase__: Optional[int] = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCAmelCase__ , ['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [48, 25, 21, 1_289] ) lowercase__: Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCAmelCase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) lowercase__: Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) lowercase__: str = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ ) self.assertListEqual( lowerCAmelCase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' lowercase__: Optional[int] = AlbertTokenizer(lowerCAmelCase__ ) lowercase__: List[Any] = tokenizer.encode('sequence builders' ) lowercase__: Tuple = tokenizer.encode('multi-sequence build' ) lowercase__: str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ ) lowercase__: Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]: '''simple docstring''' # fmt: off lowercase__: List[Any] = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
335
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __snake_case ( _lowercase): snake_case__ : int = "wav2vec2" def __init__( self : List[str] , __lowerCAmelCase : Tuple=3_2 , __lowerCAmelCase : Tuple=7_6_8 , __lowerCAmelCase : Union[str, Any]=1_2 , __lowerCAmelCase : Optional[Any]=1_2 , __lowerCAmelCase : Optional[int]=3_0_7_2 , __lowerCAmelCase : List[Any]="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : str=0.0 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : Optional[Any]=1E-5 , __lowerCAmelCase : List[str]="group" , __lowerCAmelCase : int="gelu" , __lowerCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __lowerCAmelCase : Tuple=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : Any=(1_0, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Any=1_2_8 , __lowerCAmelCase : Optional[Any]=1_6 , __lowerCAmelCase : int=False , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Optional[int]=0.05 , __lowerCAmelCase : Union[str, Any]=1_0 , __lowerCAmelCase : Optional[Any]=2 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[str]=1_0 , __lowerCAmelCase : Tuple=0 , __lowerCAmelCase : Tuple=3_2_0 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : Dict=1_0_0 , __lowerCAmelCase : Union[str, Any]=2_5_6 , __lowerCAmelCase : str=2_5_6 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : int="sum" , __lowerCAmelCase : Any=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Dict=2_5_6 , __lowerCAmelCase : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __lowerCAmelCase : Optional[Any]=(5, 3, 3, 1, 1) , __lowerCAmelCase : Optional[int]=(1, 2, 3, 1, 1) , __lowerCAmelCase : List[str]=5_1_2 , __lowerCAmelCase : Any=0 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Tuple=3 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None , **__lowerCAmelCase : int , ): """simple docstring""" super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _lowerCamelCase : Any = hidden_size _lowerCamelCase : Union[str, Any] = feat_extract_norm _lowerCamelCase : Dict = feat_extract_activation _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = list(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = list(__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = conv_bias _lowerCamelCase : Any = num_conv_pos_embeddings _lowerCamelCase : Any = num_conv_pos_embedding_groups _lowerCamelCase : Dict = len(self.conv_dim ) _lowerCamelCase : Tuple = num_hidden_layers _lowerCamelCase : Union[str, Any] = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : Union[str, Any] = num_attention_heads _lowerCamelCase : List[Any] = hidden_dropout _lowerCamelCase : int = attention_dropout _lowerCamelCase : List[Any] = activation_dropout _lowerCamelCase : Tuple = feat_proj_dropout _lowerCamelCase : List[Any] = final_dropout _lowerCamelCase : List[Any] = layerdrop _lowerCamelCase : List[str] = layer_norm_eps _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : str = vocab_size _lowerCamelCase : int = do_stable_layer_norm _lowerCamelCase : Optional[Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : Union[str, Any] = apply_spec_augment _lowerCamelCase : Any = mask_time_prob _lowerCamelCase : List[Any] = mask_time_length _lowerCamelCase : Tuple = mask_time_min_masks _lowerCamelCase : str = mask_feature_prob _lowerCamelCase : Union[str, Any] = mask_feature_length _lowerCamelCase : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : List[Any] = num_codevectors_per_group _lowerCamelCase : List[str] = num_codevector_groups _lowerCamelCase : Dict = contrastive_logits_temperature _lowerCamelCase : str = feat_quantizer_dropout _lowerCamelCase : Optional[int] = num_negatives _lowerCamelCase : Optional[int] = codevector_dim _lowerCamelCase : Union[str, Any] = proj_codevector_dim _lowerCamelCase : str = diversity_loss_weight # ctc loss _lowerCamelCase : Tuple = ctc_loss_reduction _lowerCamelCase : List[str] = ctc_zero_infinity # adapter _lowerCamelCase : Union[str, Any] = add_adapter _lowerCamelCase : List[Any] = adapter_kernel_size _lowerCamelCase : Dict = adapter_stride _lowerCamelCase : Dict = num_adapter_layers _lowerCamelCase : Optional[int] = output_hidden_size or hidden_size _lowerCamelCase : Optional[Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCamelCase : List[str] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCamelCase : Union[str, Any] = list(__lowerCAmelCase ) _lowerCamelCase : int = list(__lowerCAmelCase ) _lowerCamelCase : Optional[int] = list(__lowerCAmelCase ) _lowerCamelCase : Tuple = xvector_output_dim @property def SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
83
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor __snake_case : int = logging.get_logger(__name__) class __UpperCAmelCase ( _UpperCamelCase ): '''simple docstring''' def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> None: warnings.warn( '''The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PerceiverImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
174
'''simple docstring''' import argparse import os import re import packaging.version __snake_case : int = 'examples/' __snake_case : Dict = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } __snake_case : List[str] = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } __snake_case : int = 'README.md' def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : List[Any], _UpperCamelCase : List[str] ) -> int: with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f: A_ = f.read() A_ ,A_ = REPLACE_PATTERNS[pattern] A_ = replace.replace('''VERSION''', _UpperCamelCase ) A_ = re_pattern.sub(_UpperCamelCase, _UpperCamelCase ) with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f: f.write(_UpperCamelCase ) def _UpperCAmelCase ( _UpperCamelCase : Any ) -> int: for folder, directories, fnames in os.walk(_UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(_UpperCamelCase, _UpperCamelCase ), _UpperCamelCase, pattern='''examples''' ) def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : str=False ) -> List[str]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) if not patch: update_version_in_examples(_UpperCamelCase ) def _UpperCAmelCase ( ) -> Dict: A_ = '''🤗 Transformers currently provides the following architectures''' A_ = '''1. Want to contribute a new model?''' with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f: A_ = f.readlines() # Find the start of the list. A_ = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 A_ = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): A_ = lines[index].replace( '''https://huggingface.co/docs/diffusers/main/model_doc''', '''https://huggingface.co/docs/diffusers/model_doc''', ) index += 1 with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f: f.writelines(_UpperCamelCase ) def _UpperCAmelCase ( ) -> List[Any]: with open(REPLACE_FILES['''init'''], '''r''' ) as f: A_ = f.read() A_ = REPLACE_PATTERNS['''init'''][0].search(_UpperCamelCase ).groups()[0] return packaging.version.parse(_UpperCamelCase ) def _UpperCAmelCase ( _UpperCamelCase : str=False ) -> Union[str, Any]: A_ = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: A_ = default_version.base_version elif patch: A_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: A_ = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. A_ = input(F'''Which version are you releasing? [{default_version}]''' ) if len(_UpperCamelCase ) == 0: A_ = default_version print(F'''Updating version to {version}.''' ) global_version_update(_UpperCamelCase, patch=_UpperCamelCase ) def _UpperCAmelCase ( ) -> int: A_ = get_version() A_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' A_ = current_version.base_version # Check with the user we got that right. A_ = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(_UpperCamelCase ) == 0: A_ = dev_version print(F'''Updating version to {version}.''' ) global_version_update(_UpperCamelCase ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": __snake_case : int = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') __snake_case : Optional[Any] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
174
1
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : List[Any] = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class lowerCamelCase_ (lowerCAmelCase_ ): '''simple docstring''' __UpperCamelCase: Optional[Any] = "xlm-prophetnet" __UpperCamelCase: List[str] = ["past_key_values"] __UpperCamelCase: int = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self : str , A : Optional[float] = 0.1 , A : Optional[Union[str, Callable]] = "gelu" , A : Optional[int] = 30522 , A : Optional[int] = 1024 , A : Optional[int] = 4096 , A : Optional[int] = 12 , A : Optional[int] = 16 , A : Optional[int] = 4096 , A : Optional[int] = 12 , A : Optional[int] = 16 , A : Optional[float] = 0.1 , A : Optional[float] = 0.1 , A : Optional[int] = 512 , A : Optional[float] = 0.02 , A : Optional[bool] = True , A : Optional[bool] = True , A : Optional[int] = 0 , A : Optional[int] = 2 , A : Optional[int] = 32 , A : Optional[int] = 128 , A : Optional[bool] = False , A : Optional[float] = 0.0 , A : Optional[bool] = True , A : Optional[int] = 0 , A : Optional[int] = 1 , A : Optional[int] = 2 , **A : int , ): _UpperCAmelCase : Any = vocab_size _UpperCAmelCase : str = hidden_size _UpperCAmelCase : Union[str, Any] = encoder_ffn_dim _UpperCAmelCase : List[Any] = num_encoder_layers _UpperCAmelCase : str = num_encoder_attention_heads _UpperCAmelCase : int = decoder_ffn_dim _UpperCAmelCase : Optional[int] = num_decoder_layers _UpperCAmelCase : List[Any] = num_decoder_attention_heads _UpperCAmelCase : Optional[int] = max_position_embeddings _UpperCAmelCase : List[str] = init_std # Normal(0, this parameter) _UpperCAmelCase : Optional[int] = activation_function # parameters for xlmprophetnet _UpperCAmelCase : Dict = ngram _UpperCAmelCase : List[Any] = num_buckets _UpperCAmelCase : Tuple = relative_max_distance _UpperCAmelCase : Optional[int] = disable_ngram_loss _UpperCAmelCase : Optional[Any] = eps # 3 Types of Dropout _UpperCAmelCase : int = attention_dropout _UpperCAmelCase : str = activation_dropout _UpperCAmelCase : Union[str, Any] = dropout _UpperCAmelCase : Optional[int] = use_cache super().__init__( pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , add_cross_attention=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) @property def _A ( self : Dict ): return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def _A ( self : Optional[int] , A : Tuple ): raise NotImplementedError( "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" " `num_decoder_layers`." )
244
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class SCREAMING_SNAKE_CASE_ : '''simple docstring''' lowercase : Tuple = BlenderbotConfig lowercase : Optional[int] = {} lowercase : Union[str, Any] = "gelu" def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[int]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=37 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=20 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : str=0 , ) -> List[str]: A : List[Any] =parent A : str =batch_size A : int =seq_length A : Optional[Any] =is_training A : List[str] =use_labels A : List[Any] =vocab_size A : Tuple =hidden_size A : List[Any] =num_hidden_layers A : int =num_attention_heads A : Optional[int] =intermediate_size A : List[Any] =hidden_dropout_prob A : Union[str, Any] =attention_probs_dropout_prob A : Optional[Any] =max_position_embeddings A : Optional[Any] =eos_token_id A : List[str] =pad_token_id A : Union[str, Any] =bos_token_id def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict: A : Dict =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A : List[Any] =tf.concat([input_ids, eos_tensor] , axis=1 ) A : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Any =self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A : int =prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> str: A : List[Any] =TFBlenderbotModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder() A : int =inputs_dict['input_ids'] A : Any =input_ids[:1, :] A : Optional[Any] =inputs_dict['attention_mask'][:1, :] A : List[str] =inputs_dict['head_mask'] A : List[Any] =1 # first forward pass A : Any =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ ) A , A : Any =outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A : List[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A : Union[str, Any] =tf.concat([input_ids, next_tokens] , axis=-1 ) A : List[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A : List[str] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0] A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A : Tuple =int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A : Tuple =output_from_no_past[:, -3:, random_slice_idx] A : str =output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-3 ) def A__ ( lowercase: List[str], lowercase: List[str], lowercase: Optional[Any], lowercase: Optional[Any]=None, lowercase: Tuple=None, lowercase: List[str]=None, lowercase: Union[str, Any]=None, lowercase: Dict=None, ) -> Dict: if attention_mask is None: A : Any =tf.cast(tf.math.not_equal(lowercase, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: A : Optional[Any] =tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: A : List[Any] =tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A : Tuple =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' lowercase : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () lowercase : Any = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) lowercase : Tuple = True lowercase : Dict = False lowercase : int = False def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any: A : Optional[Any] =TFBlenderbotModelTester(self ) A : Dict =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple: self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]: A : Any =self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ ) @require_tokenizers @require_tf class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): '''simple docstring''' lowercase : List[Any] = ["My friends are cool but they eat too many carbs."] lowercase : str = "facebook/blenderbot-400M-distill" @cached_property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]: return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]: A : Tuple =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]: A : Optional[Any] =self.tokenizer(self.src_text , return_tensors='tf' ) A : Tuple =self.model.generate( model_inputs.input_ids , ) A : Union[str, Any] =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
305
0
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __UpperCamelCase : Tuple = logging.get_logger(__name__) class _UpperCamelCase ( A ): '''simple docstring''' a_ : Dict = "vision-encoder-decoder" a_ : Tuple = True def __init__( self : List[Any] , **_lowerCamelCase : List[str] ): '''simple docstring''' super().__init__(**_lowerCamelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F"""A configuraton of type {self.model_type} cannot be instantiated because """ F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) __lowerCamelCase : Union[str, Any] = kwargs.pop("""encoder""" ) __lowerCamelCase : Union[str, Any] = encoder_config.pop("""model_type""" ) __lowerCamelCase : Optional[int] = kwargs.pop("""decoder""" ) __lowerCamelCase : str = decoder_config.pop("""model_type""" ) __lowerCamelCase : str = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase ) __lowerCamelCase : Tuple = AutoConfig.for_model(_lowerCamelCase , **_lowerCamelCase ) __lowerCamelCase : Union[str, Any] = True @classmethod def _snake_case ( cls : int , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : PretrainedConfig , **_lowerCamelCase : str ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) __lowerCamelCase : Optional[int] = True __lowerCamelCase : Tuple = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowerCamelCase ) def _snake_case ( self : Optional[Any] ): '''simple docstring''' __lowerCamelCase : Optional[Any] = copy.deepcopy(self.__dict__ ) __lowerCamelCase : List[Any] = self.encoder.to_dict() __lowerCamelCase : int = self.decoder.to_dict() __lowerCamelCase : List[str] = self.__class__.model_type return output class _UpperCamelCase ( A ): '''simple docstring''' a_ : Optional[Any] = version.parse("1.11" ) @property def _snake_case ( self : Dict ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _snake_case ( self : Dict ): '''simple docstring''' return 1E-4 @property def _snake_case ( self : Optional[int] ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class _UpperCamelCase ( A ): '''simple docstring''' @property def _snake_case ( self : Dict ): '''simple docstring''' __lowerCamelCase : Dict = OrderedDict() __lowerCamelCase : Any = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __lowerCamelCase : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __lowerCamelCase : Optional[int] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def _snake_case ( self : Optional[int] , _lowerCamelCase : "PreTrainedTokenizerBase" , _lowerCamelCase : int = -1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional["TensorType"] = None , ): '''simple docstring''' import torch __lowerCamelCase : List[str] = OrderedDict() __lowerCamelCase : List[str] = super().generate_dummy_inputs( _lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase ) __lowerCamelCase : List[Any] = dummy_input["""input_ids"""].shape __lowerCamelCase : Optional[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size) __lowerCamelCase : Any = dummy_input.pop("""input_ids""" ) __lowerCamelCase : Dict = dummy_input.pop("""attention_mask""" ) __lowerCamelCase : int = torch.zeros(_lowerCamelCase ) return common_inputs class _UpperCamelCase ( A ): '''simple docstring''' @property def _snake_case ( self : Any ): '''simple docstring''' pass def _snake_case ( self : str , _lowerCamelCase : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(_lowerCamelCase ) def _snake_case ( self : str , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : PretrainedConfig , _lowerCamelCase : str = "default" ): '''simple docstring''' __lowerCamelCase : List[Any] = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(_lowerCamelCase , _lowerCamelCase )
709
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ): """simple docstring""" __lowerCamelCase , __lowerCamelCase : str = [], [] __lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) ) __lowerCamelCase , __lowerCamelCase : List[str] = sorted_examples[0] def is_too_big(UpperCAmelCase : Optional[Any] ): return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens for src, tgt in tqdm(sorted_examples[1:] ): __lowerCamelCase : Union[str, Any] = new_src + """ """ + src __lowerCamelCase : str = new_tgt + """ """ + tgt if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example finished_src.append(UpperCAmelCase ) finished_tgt.append(UpperCAmelCase ) __lowerCamelCase , __lowerCamelCase : str = src, tgt else: # can fit, keep adding __lowerCamelCase , __lowerCamelCase : int = cand_src, cand_tgt # cleanup if new_src: assert new_tgt finished_src.append(UpperCAmelCase ) finished_tgt.append(UpperCAmelCase ) return finished_src, finished_tgt def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ): """simple docstring""" __lowerCamelCase : List[Any] = Path(UpperCAmelCase ) save_path.mkdir(exist_ok=UpperCAmelCase ) for split in ["train"]: __lowerCamelCase , __lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" __lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()] __lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()] __lowerCamelCase , __lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" ) Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) ) Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) ) for split in ["val", "test"]: __lowerCamelCase , __lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target""" shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" ) shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" ) def _UpperCAmelCase ( ): """simple docstring""" __lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" ) parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 ) parser.add_argument("""--data_dir""" , type=UpperCAmelCase ) parser.add_argument("""--save_path""" , type=UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = parser.parse_args() __lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name ) return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path ) if __name__ == "__main__": packer_cli()
458
0
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : int = 10_00 ): lowercase_ :Any = 2**power lowercase_ :Union[str, Any] = str(__lowerCamelCase ) lowercase_ :Union[str, Any] = list(__lowerCamelCase ) lowercase_ :List[Any] = 0 for i in list_num: sum_of_num += int(__lowerCamelCase ) return sum_of_num if __name__ == "__main__": lowerCAmelCase : int =int(input('''Enter the power of 2: ''').strip()) print('''2 ^ ''', power, ''' = ''', 2**power) lowerCAmelCase : List[Any] =solution(power) print('''Sum of the digits is: ''', result)
172
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def UpperCAmelCase_ ( __lowerCamelCase : NDArray[floataa] ,__lowerCamelCase : NDArray[floataa] ,__lowerCamelCase : list[int] ,__lowerCamelCase : int ,): lowercase_ , lowercase_ :str = coefficient_matrix.shape lowercase_ , lowercase_ :List[Any] = constant_matrix.shape if rowsa != colsa: lowercase_ :Optional[int] = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(__lowerCamelCase ) if colsa != 1: lowercase_ :int = F'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(__lowerCamelCase ) if rowsa != rowsa: lowercase_ :Optional[Any] = ( "Coefficient and constant matrices dimensions must be nxn and nx1 but " F'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(__lowerCamelCase ) if len(__lowerCamelCase ) != rowsa: lowercase_ :Tuple = ( "Number of initial values must be equal to number of rows in coefficient " F'matrix but received {len(__lowerCamelCase )} and {rowsa}' ) raise ValueError(__lowerCamelCase ) if iterations <= 0: raise ValueError("Iterations must be at least 1" ) lowercase_ :NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) ,axis=1 ) lowercase_ , lowercase_ :Optional[int] = table.shape strictly_diagonally_dominant(__lowerCamelCase ) # Iterates the whole matrix for given number of times for _ in range(__lowerCamelCase ): lowercase_ :Optional[Any] = [] for row in range(__lowerCamelCase ): lowercase_ :Tuple = 0 for col in range(__lowerCamelCase ): if col == row: lowercase_ :List[str] = table[row][col] elif col == cols - 1: lowercase_ :Any = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] lowercase_ :Optional[int] = (temp + val) / denom new_val.append(__lowerCamelCase ) lowercase_ :Tuple = new_val return [float(__lowerCamelCase ) for i in new_val] def UpperCAmelCase_ ( __lowerCamelCase : NDArray[floataa] ): lowercase_ , lowercase_ :Optional[Any] = table.shape lowercase_ :Tuple = True for i in range(0 ,__lowerCamelCase ): lowercase_ :Union[str, Any] = 0 for j in range(0 ,cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("Coefficient matrix is not strictly diagonally dominant" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
172
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase ( unittest.TestCase ): def __init__(self : Any , _A : Tuple , _A : List[str]=1_3 , _A : Optional[Any]=3 , _A : Dict=2_2_4 , _A : Optional[int]=3_0 , _A : str=4_0_0 , _A : List[Any]=True , _A : int=None , _A : Any=True , _A : Tuple=[0.5, 0.5, 0.5] , _A : Optional[Any]=[0.5, 0.5, 0.5] , ) -> List[str]: snake_case = size if size is not None else {"height": 1_8, "width": 1_8} snake_case = parent snake_case = batch_size snake_case = num_channels snake_case = image_size snake_case = min_resolution snake_case = max_resolution snake_case = do_resize snake_case = size snake_case = do_normalize snake_case = image_mean snake_case = image_std def UpperCAmelCase(self : str ) -> str: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCamelCase ( A_ , unittest.TestCase ): UpperCAmelCase__ : int = ViTImageProcessor if is_vision_available() else None def UpperCAmelCase(self : str ) -> Union[str, Any]: snake_case = EfficientFormerImageProcessorTester(self ) @property def UpperCAmelCase(self : Dict ) -> Tuple: return self.image_proc_tester.prepare_image_processor_dict() def UpperCAmelCase(self : str ) -> Union[str, Any]: snake_case = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , "image_mean" ) ) self.assertTrue(hasattr(_A , "image_std" ) ) self.assertTrue(hasattr(_A , "do_normalize" ) ) self.assertTrue(hasattr(_A , "do_resize" ) ) self.assertTrue(hasattr(_A , "size" ) ) def UpperCAmelCase(self : str ) -> Any: pass def UpperCAmelCase(self : Dict ) -> int: # Initialize image_processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input snake_case = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched snake_case = image_processor(_A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def UpperCAmelCase(self : List[str] ) -> Union[str, Any]: # Initialize image_processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input snake_case = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched snake_case = image_processor(_A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def UpperCAmelCase(self : str ) -> Optional[Any]: # Initialize image_processor snake_case = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case = prepare_image_inputs(self.image_proc_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input snake_case = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched snake_case = image_processor(_A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
704
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class lowerCamelCase ( A_ , unittest.TestCase ): UpperCAmelCase__ : Any = CpmAntTokenizer UpperCAmelCase__ : Optional[Any] = False def UpperCAmelCase(self : Optional[Any] ) -> Dict: super().setUp() snake_case = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) @tooslow def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]: snake_case = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" ) snake_case = "今天天气真好!" snake_case = ["今天", "天气", "真", "好", "!"] snake_case = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) snake_case = "今天天气真好!" snake_case = [tokenizer.bos_token] + tokens snake_case = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A ) snake_case = tokenizer.decode(_A ) self.assertEqual(_A , _A )
294
0
import math def lowercase__ ( A_: int ) -> bool: """simple docstring""" assert isinstance(A_ , A_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False __UpperCAmelCase =range(3 , int(math.sqrt(A_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def lowercase__ ( A_: Any , A_: Optional[int]=1 , **A_: int ) -> int: """simple docstring""" __UpperCAmelCase =factor * value __UpperCAmelCase =value while not is_prime(A_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **A_ ) return value
68
import unittest import numpy as np from transformers import RobertaConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.roberta.modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, ) class _A ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]: __UpperCAmelCase =parent __UpperCAmelCase =batch_size __UpperCAmelCase =seq_length __UpperCAmelCase =is_training __UpperCAmelCase =use_attention_mask __UpperCAmelCase =use_token_type_ids __UpperCAmelCase =use_labels __UpperCAmelCase =vocab_size __UpperCAmelCase =hidden_size __UpperCAmelCase =num_hidden_layers __UpperCAmelCase =num_attention_heads __UpperCAmelCase =intermediate_size __UpperCAmelCase =hidden_act __UpperCAmelCase =hidden_dropout_prob __UpperCAmelCase =attention_probs_dropout_prob __UpperCAmelCase =max_position_embeddings __UpperCAmelCase =type_vocab_size __UpperCAmelCase =type_sequence_label_size __UpperCAmelCase =initializer_range __UpperCAmelCase =num_choices def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase =None if self.use_attention_mask: __UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase =None if self.use_token_type_ids: __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase =RobertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self : Tuple ) -> Optional[int]: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def _a ( self : List[str] ) -> Dict: __UpperCAmelCase =self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs __UpperCAmelCase =True __UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Union[str, Any] = True lowerCamelCase : Union[str, Any] = ( ( FlaxRobertaModel, FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self : List[Any] ) -> List[str]: __UpperCAmelCase =FlaxRobertaModelTester(self ) @slow def _a ( self : Optional[Any] ) -> List[Any]: for model_class_name in self.all_model_classes: __UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
68
1
'''simple docstring''' import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowercase : '''simple docstring''' def __init__( self ,lowerCamelCase_ ,lowerCamelCase_=13 ,lowerCamelCase_=[30, 30] ,lowerCamelCase_=2 ,lowerCamelCase_=3 ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=32 ,lowerCamelCase_=5 ,lowerCamelCase_=4 ,lowerCamelCase_=37 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=10 ,lowerCamelCase_=0.02 ,lowerCamelCase_=3 ,lowerCamelCase_=None ,lowerCamelCase_=8 ,lowerCamelCase_=10 ,) -> str: '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : Optional[int] = batch_size UpperCAmelCase__ : Tuple = image_size UpperCAmelCase__ : int = patch_size UpperCAmelCase__ : Optional[int] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : Tuple = use_labels UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Any = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : Optional[int] = intermediate_size UpperCAmelCase__ : Union[str, Any] = hidden_act UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : int = initializer_range UpperCAmelCase__ : Optional[int] = num_labels UpperCAmelCase__ : Optional[int] = scope UpperCAmelCase__ : Union[str, Any] = n_targets UpperCAmelCase__ : int = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens UpperCAmelCase__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size) UpperCAmelCase__ : List[Any] = num_patches + 1 + self.num_detection_tokens def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) UpperCAmelCase__ : List[str] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) UpperCAmelCase__ : List[Any] = [] for i in range(self.batch_size ): UpperCAmelCase__ : Any = {} UpperCAmelCase__ : str = torch.randint( high=self.num_labels ,size=(self.n_targets,) ,device=lowerCamelCase_ ) UpperCAmelCase__ : int = torch.rand(self.n_targets ,4 ,device=lowerCamelCase_ ) labels.append(lowerCamelCase_ ) UpperCAmelCase__ : Any = self.get_config() return config, pixel_values, labels def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' return YolosConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase_ ,initializer_range=self.initializer_range ,num_detection_tokens=self.num_detection_tokens ,num_labels=self.num_labels ,) def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple: '''simple docstring''' UpperCAmelCase__ : int = YolosModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase__ : Optional[int] = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.expected_seq_len, self.hidden_size) ) def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = YolosForObjectDetection(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCAmelCase__ : Dict = model(pixel_values=lowerCamelCase_ ) UpperCAmelCase__ : str = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) ) UpperCAmelCase__ : Any = model(pixel_values=lowerCamelCase_ ,labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape ,() ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape ,(self.batch_size, self.num_detection_tokens, 4) ) def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs UpperCAmelCase__ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowercase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Any = (YolosModel, YolosForObjectDetection) if is_torch_available() else () UpperCAmelCase_ : str = ( {'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {} ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : str = False UpperCAmelCase_ : str = False UpperCAmelCase_ : Optional[Any] = False def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=False ) -> List[Any]: '''simple docstring''' UpperCAmelCase__ : int = super()._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ,return_labels=lowerCamelCase_ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": UpperCAmelCase__ : int = [] for i in range(self.model_tester.batch_size ): UpperCAmelCase__ : List[str] = {} UpperCAmelCase__ : Optional[int] = torch.ones( size=(self.model_tester.n_targets,) ,device=lowerCamelCase_ ,dtype=torch.long ) UpperCAmelCase__ : Optional[int] = torch.ones( self.model_tester.n_targets ,4 ,device=lowerCamelCase_ ,dtype=torch.float ) labels.append(lowerCamelCase_ ) UpperCAmelCase__ : List[str] = labels return inputs_dict def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase__ : int = YolosModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase_ ,has_text_modality=lowerCamelCase_ ,hidden_size=37 ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' pass def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) UpperCAmelCase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ ,nn.Linear ) ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(lowerCamelCase_ ) UpperCAmelCase__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : int = [*signature.parameters.keys()] UpperCAmelCase__ : int = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,lowerCamelCase_ ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[Any] = True # in YOLOS, the seq_len is different UpperCAmelCase__ : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: UpperCAmelCase__ : List[Any] = True UpperCAmelCase__ : List[Any] = False UpperCAmelCase__ : Optional[Any] = True UpperCAmelCase__ : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase__ : Any = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase__ : List[Any] = True UpperCAmelCase__ : Any = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase__ : List[Any] = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,) UpperCAmelCase__ : str = len(lowerCamelCase_ ) # Check attention is always last and order is fine UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : Tuple = True UpperCAmelCase__ : str = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase__ : int = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase__ : Dict = 1 self.assertEqual(out_len + added_hidden_states ,len(lowerCamelCase_ ) ) UpperCAmelCase__ : int = outputs.attentions self.assertEqual(len(lowerCamelCase_ ) ,self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,) def lowerCAmelCase__ ( self ) -> List[str]: '''simple docstring''' def check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase__ : List[str] = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ ) ) UpperCAmelCase__ : Dict = outputs.hidden_states UpperCAmelCase__ : Tuple = getattr( self.model_tester ,'''expected_num_hidden_layers''' ,self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ ) # YOLOS has a different seq_length UpperCAmelCase__ : Optional[int] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,) UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = True check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : Dict = True check_hidden_states_output(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*lowerCamelCase_ ) @slow def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = YolosModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def __UpperCamelCase( ): '''simple docstring''' UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None @slow def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(lowerCamelCase_ ) UpperCAmelCase__ : Optional[Any] = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Any = image_processor(images=lowerCamelCase_ ,return_tensors='''pt''' ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Any = model(inputs.pixel_values ) # verify outputs UpperCAmelCase__ : Dict = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape ,lowerCamelCase_ ) UpperCAmelCase__ : Optional[Any] = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ,device=lowerCamelCase_ ,) UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ,device=lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] ,lowerCamelCase_ ,atol=1e-4 ) ) # verify postprocessing UpperCAmelCase__ : Union[str, Any] = image_processor.post_process_object_detection( lowerCamelCase_ ,threshold=0.3 ,target_sizes=[image.size[::-1]] )[0] UpperCAmelCase__ : Optional[Any] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(lowerCamelCase_ ) UpperCAmelCase__ : Optional[int] = [75, 75, 17, 63, 17] UpperCAmelCase__ : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(lowerCamelCase_ ) self.assertEqual(len(results['''scores'''] ) ,5 ) self.assertTrue(torch.allclose(results['''scores'''] ,lowerCamelCase_ ,atol=1e-4 ) ) self.assertSequenceEqual(results['''labels'''].tolist() ,lowerCamelCase_ ) self.assertTrue(torch.allclose(results['''boxes'''][0, :] ,lowerCamelCase_ ) )
496
'''simple docstring''' import argparse import collections import json import os import re import string import sys import numpy as np UpperCamelCase__ : Union[str, Any] = re.compile(r'\b(a|an|the)\b', re.UNICODE) UpperCamelCase__ : List[Any] = None def __UpperCamelCase( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' ) parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' ) parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' ) parser.add_argument( '''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' ) parser.add_argument( '''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' ) parser.add_argument( '''--na-prob-thresh''' , '''-t''' , type=_A , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , ) parser.add_argument( '''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=_A , help='''Save precision-recall curves to directory.''' ) parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def __UpperCamelCase( _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: UpperCAmelCase__ : Union[str, Any] = bool(qa['''answers''']['''text'''] ) return qid_to_has_ans def __UpperCamelCase( _A : Dict ): '''simple docstring''' def remove_articles(_A : Union[str, Any] ): return ARTICLES_REGEX.sub(''' ''' , _A ) def white_space_fix(_A : Optional[int] ): return " ".join(text.split() ) def remove_punc(_A : Optional[Any] ): UpperCAmelCase__ : Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_A : Union[str, Any] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) ) def __UpperCamelCase( _A : Optional[Any] ): '''simple docstring''' if not s: return [] return normalize_answer(_A ).split() def __UpperCamelCase( _A : Tuple , _A : str ): '''simple docstring''' return int(normalize_answer(_A ) == normalize_answer(_A ) ) def __UpperCamelCase( _A : Optional[Any] , _A : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = get_tokens(_A ) UpperCAmelCase__ : Tuple = get_tokens(_A ) UpperCAmelCase__ : Any = collections.Counter(_A ) & collections.Counter(_A ) UpperCAmelCase__ : List[Any] = sum(common.values() ) if len(_A ) == 0 or len(_A ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 UpperCAmelCase__ : Optional[Any] = 1.0 * num_same / len(_A ) UpperCAmelCase__ : Tuple = 1.0 * num_same / len(_A ) UpperCAmelCase__ : Optional[int] = (2 * precision * recall) / (precision + recall) return fa def __UpperCamelCase( _A : List[str] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = {} UpperCAmelCase__ : Any = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: UpperCAmelCase__ : str = qa['''id'''] UpperCAmelCase__ : List[Any] = [t for t in qa['''answers''']['''text'''] if normalize_answer(_A )] if not gold_answers: # For unanswerable questions, only correct answer is empty string UpperCAmelCase__ : Tuple = [''''''] if qid not in preds: print(F'''Missing prediction for {qid}''' ) continue UpperCAmelCase__ : Union[str, Any] = preds[qid] # Take max over all gold answers UpperCAmelCase__ : List[str] = max(compute_exact(_A , _A ) for a in gold_answers ) UpperCAmelCase__ : List[str] = max(compute_fa(_A , _A ) for a in gold_answers ) return exact_scores, fa_scores def __UpperCamelCase( _A : Any , _A : Optional[Any] , _A : List[str] , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {} for qid, s in scores.items(): UpperCAmelCase__ : Dict = na_probs[qid] > na_prob_thresh if pred_na: UpperCAmelCase__ : Any = float(not qid_to_has_ans[qid] ) else: UpperCAmelCase__ : List[str] = s return new_scores def __UpperCamelCase( _A : str , _A : Optional[Any] , _A : Any=None ): '''simple docstring''' if not qid_list: UpperCAmelCase__ : List[Any] = len(_A ) return collections.OrderedDict( [ ('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total), ('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total), ('''total''', total), ] ) else: UpperCAmelCase__ : List[str] = len(_A ) return collections.OrderedDict( [ ('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total), ('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total), ('''total''', total), ] ) def __UpperCamelCase( _A : List[str] , _A : List[Any] , _A : Tuple ): '''simple docstring''' for k in new_eval: UpperCAmelCase__ : List[str] = new_eval[k] def __UpperCamelCase( _A : Tuple , _A : Any , _A : Optional[int] , _A : int ): '''simple docstring''' plt.step(_A , _A , color='''b''' , alpha=0.2 , where='''post''' ) plt.fill_between(_A , _A , step='''post''' , alpha=0.2 , color='''b''' ) plt.xlabel('''Recall''' ) plt.ylabel('''Precision''' ) plt.xlim([0.0, 1.0_5] ) plt.ylim([0.0, 1.0_5] ) plt.title(_A ) plt.savefig(_A ) plt.clf() def __UpperCamelCase( _A : Optional[int] , _A : Tuple , _A : Any , _A : Any , _A : Tuple=None , _A : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = sorted(_A , key=lambda _A : na_probs[k] ) UpperCAmelCase__ : Tuple = 0.0 UpperCAmelCase__ : Any = 1.0 UpperCAmelCase__ : Any = 0.0 UpperCAmelCase__ : Union[str, Any] = [1.0] UpperCAmelCase__ : int = [0.0] UpperCAmelCase__ : Optional[Any] = 0.0 for i, qid in enumerate(_A ): if qid_to_has_ans[qid]: true_pos += scores[qid] UpperCAmelCase__ : Optional[Any] = true_pos / float(i + 1 ) UpperCAmelCase__ : int = true_pos / float(_A ) if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(_A ) recalls.append(_A ) if out_image: plot_pr_curve(_A , _A , _A , _A ) return {"ap": 1_0_0.0 * avg_prec} def __UpperCamelCase( _A : Any , _A : Optional[Any] , _A : List[Any] , _A : Any , _A : Dict , _A : Any ): '''simple docstring''' if out_image_dir and not os.path.exists(_A ): os.makedirs(_A ) UpperCAmelCase__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return UpperCAmelCase__ : Dict = make_precision_recall_eval( _A , _A , _A , _A , out_image=os.path.join(_A , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , ) UpperCAmelCase__ : Any = make_precision_recall_eval( _A , _A , _A , _A , out_image=os.path.join(_A , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , ) UpperCAmelCase__ : Tuple = {k: float(_A ) for k, v in qid_to_has_ans.items()} UpperCAmelCase__ : Any = make_precision_recall_eval( _A , _A , _A , _A , out_image=os.path.join(_A , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , ) merge_eval(_A , _A , '''pr_exact''' ) merge_eval(_A , _A , '''pr_f1''' ) merge_eval(_A , _A , '''pr_oracle''' ) def __UpperCamelCase( _A : Tuple , _A : Dict , _A : Dict , _A : Tuple ): '''simple docstring''' if not qid_list: return UpperCAmelCase__ : Optional[Any] = [na_probs[k] for k in qid_list] UpperCAmelCase__ : Union[str, Any] = np.ones_like(_A ) / float(len(_A ) ) plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) ) plt.xlabel('''Model probability of no-answer''' ) plt.ylabel('''Proportion of dataset''' ) plt.title(F'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(_A , F'''na_prob_hist_{name}.png''' ) ) plt.clf() def __UpperCamelCase( _A : List[Any] , _A : List[str] , _A : Optional[int] , _A : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) UpperCAmelCase__ : List[str] = num_no_ans UpperCAmelCase__ : Any = cur_score UpperCAmelCase__ : List[str] = 0.0 UpperCAmelCase__ : Dict = sorted(_A , key=lambda _A : na_probs[k] ) for i, qid in enumerate(_A ): if qid not in scores: continue if qid_to_has_ans[qid]: UpperCAmelCase__ : int = scores[qid] else: if preds[qid]: UpperCAmelCase__ : Any = -1 else: UpperCAmelCase__ : Dict = 0 cur_score += diff if cur_score > best_score: UpperCAmelCase__ : Optional[Any] = cur_score UpperCAmelCase__ : Tuple = na_probs[qid] return 1_0_0.0 * best_score / len(_A ), best_thresh def __UpperCamelCase( _A : str , _A : str , _A : int , _A : int , _A : Tuple , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = find_best_thresh(_A , _A , _A , _A ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = find_best_thresh(_A , _A , _A , _A ) UpperCAmelCase__ : List[str] = best_exact UpperCAmelCase__ : Any = exact_thresh UpperCAmelCase__ : Dict = best_fa UpperCAmelCase__ : Dict = fa_thresh def __UpperCamelCase( ): '''simple docstring''' with open(OPTS.data_file ) as f: UpperCAmelCase__ : Dict = json.load(_A ) UpperCAmelCase__ : str = dataset_json['''data'''] with open(OPTS.pred_file ) as f: UpperCAmelCase__ : Any = json.load(_A ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: UpperCAmelCase__ : Optional[Any] = json.load(_A ) else: UpperCAmelCase__ : Dict = {k: 0.0 for k in preds} UpperCAmelCase__ : int = make_qid_to_has_ans(_A ) # maps qid to True/False UpperCAmelCase__ : Any = [k for k, v in qid_to_has_ans.items() if v] UpperCAmelCase__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v] UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = get_raw_scores(_A , _A ) UpperCAmelCase__ : Optional[int] = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh ) UpperCAmelCase__ : Dict = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh ) UpperCAmelCase__ : Union[str, Any] = make_eval_dict(_A , _A ) if has_ans_qids: UpperCAmelCase__ : Optional[int] = make_eval_dict(_A , _A , qid_list=_A ) merge_eval(_A , _A , '''HasAns''' ) if no_ans_qids: UpperCAmelCase__ : Dict = make_eval_dict(_A , _A , qid_list=_A ) merge_eval(_A , _A , '''NoAns''' ) if OPTS.na_prob_file: find_all_best_thresh(_A , _A , _A , _A , _A , _A ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir ) histogram_na_prob(_A , _A , OPTS.out_image_dir , '''hasAns''' ) histogram_na_prob(_A , _A , OPTS.out_image_dir , '''noAns''' ) if OPTS.out_file: with open(OPTS.out_file , '''w''' ) as f: json.dump(_A , _A ) else: print(json.dumps(_A , indent=2 ) ) if __name__ == "__main__": UpperCamelCase__ : Tuple = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
496
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=snake_case ) class __magic_name__ ( snake_case ): _lowerCAmelCase = field(default="language-modeling", metadata={"include_in_asdict_even_if_is_default": True} ) _lowerCAmelCase = Features({"text": Value("string" )} ) _lowerCAmelCase = Features({} ) _lowerCAmelCase = "text" @property def _A ( self : Any ): return {self.text_column: "text"}
348
from math import loga def UpperCAmelCase__ ( __magic_name__ : int ): '''simple docstring''' if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(__magic_name__ , __magic_name__ ): raise TypeError('''Input value must be a \'int\' type''' ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
348
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input UpperCAmelCase__ :List[str] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine' def __lowercase () -> List[str]: """simple docstring""" __lowerCamelCase : Tuple = _ask_options( """In which compute environment are you running?""", ["""This machine""", """AWS (Amazon SageMaker)"""], _convert_compute_environment, ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: __lowerCamelCase : int = get_sagemaker_input() else: __lowerCamelCase : Optional[int] = get_cluster_input() return config def __lowercase (_lowercase=None ) -> str: """simple docstring""" if subparsers is not None: __lowerCamelCase : Dict = subparsers.add_parser("""config""", description=_A ) else: __lowerCamelCase : Optional[int] = argparse.ArgumentParser("""Accelerate config command""", description=_A ) parser.add_argument( """--config_file""", default=_A, help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ), ) if subparsers is not None: parser.set_defaults(func=_A ) return parser def __lowercase (_lowercase ) -> Optional[int]: """simple docstring""" __lowerCamelCase : List[Any] = get_user_input() if args.config_file is not None: __lowerCamelCase : List[Any] = args.config_file else: if not os.path.isdir(_A ): os.makedirs(_A ) __lowerCamelCase : Tuple = default_yaml_config_file if config_file.endswith(""".json""" ): config.to_json_file(_A ) else: config.to_yaml_file(_A ) print(f"accelerate configuration saved at {config_file}" ) def __lowercase () -> Dict: """simple docstring""" __lowerCamelCase : Dict = config_command_parser() __lowerCamelCase : List[Any] = parser.parse_args() config_command(_A ) if __name__ == "__main__": main()
709
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() @slow @require_torch_gpu class SCREAMING_SNAKE_CASE ( unittest.TestCase ): def a_ ( self : Union[str, Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase : Dict = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" ) __lowerCamelCase : Tuple = sd_pipe.to(A__ ) sd_pipe.set_progress_bar_config(disable=A__ ) sd_pipe.set_scheduler("""sample_euler""" ) __lowerCamelCase : Any = """A painting of a squirrel eating a burger""" __lowerCamelCase : List[Any] = torch.manual_seed(0 ) __lowerCamelCase : Dict = sd_pipe([prompt] , generator=A__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) __lowerCamelCase : Tuple = output.images __lowerCamelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowerCamelCase : List[str] = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def a_ ( self : Dict ): """simple docstring""" __lowerCamelCase : Any = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) __lowerCamelCase : int = sd_pipe.to(A__ ) sd_pipe.set_progress_bar_config(disable=A__ ) sd_pipe.set_scheduler("""sample_euler""" ) __lowerCamelCase : List[Any] = """A painting of a squirrel eating a burger""" __lowerCamelCase : Union[str, Any] = torch.manual_seed(0 ) __lowerCamelCase : int = sd_pipe([prompt] , generator=A__ , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" ) __lowerCamelCase : List[str] = output.images __lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowerCamelCase : Optional[int] = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1 def a_ ( self : str ): """simple docstring""" __lowerCamelCase : List[str] = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" ) __lowerCamelCase : List[str] = sd_pipe.to(A__ ) sd_pipe.set_progress_bar_config(disable=A__ ) sd_pipe.set_scheduler("""sample_dpmpp_2m""" ) __lowerCamelCase : int = """A painting of a squirrel eating a burger""" __lowerCamelCase : Tuple = torch.manual_seed(0 ) __lowerCamelCase : Union[str, Any] = sd_pipe( [prompt] , generator=A__ , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=A__ , ) __lowerCamelCase : int = output.images __lowerCamelCase : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __lowerCamelCase : int = np.array( [0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
483
0
from __future__ import annotations def A__ ( __A : list[int] ) ->list[int]: # This function is recursive __A =len(__A ) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else __A =array[0] __A =False __A =1 __A =[] while not is_found and i < array_length: if array[i] < pivot: __A =True __A =[element for element in array[i:] if element >= array[i]] __A =longest_subsequence(__A ) if len(__A ) > len(__A ): __A =temp_array else: i += 1 __A =[element for element in array[1:] if element >= pivot] __A =[pivot, *longest_subsequence(__A )] if len(__A ) > len(__A ): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
184
from __future__ import annotations import math def _A (UpperCamelCase : list , UpperCamelCase : list ) ->list: '''simple docstring''' if len(UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(UpperCamelCase ) != 2 or len(b[0] ) != 2: raise Exception("""Matrices are not 2x2""" ) lowerCamelCase__ : Union[str, Any] = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def _A (UpperCamelCase : list , UpperCamelCase : list ) ->int: '''simple docstring''' return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCamelCase ) ) ] def _A (UpperCamelCase : list , UpperCamelCase : list ) ->Tuple: '''simple docstring''' return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(UpperCamelCase ) ) ] def _A (UpperCamelCase : list ) ->tuple[list, list, list, list]: '''simple docstring''' if len(UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("""Odd matrices are not supported!""" ) lowerCamelCase__ : List[Any] = len(UpperCamelCase ) lowerCamelCase__ : Tuple = matrix_length // 2 lowerCamelCase__ : Tuple = [[a[i][j] for j in range(UpperCamelCase , UpperCamelCase )] for i in range(UpperCamelCase )] lowerCamelCase__ : Optional[int] = [ [a[i][j] for j in range(UpperCamelCase , UpperCamelCase )] for i in range(UpperCamelCase , UpperCamelCase ) ] lowerCamelCase__ : Union[str, Any] = [[a[i][j] for j in range(UpperCamelCase )] for i in range(UpperCamelCase )] lowerCamelCase__ : int = [[a[i][j] for j in range(UpperCamelCase )] for i in range(UpperCamelCase , UpperCamelCase )] return top_left, top_right, bot_left, bot_right def _A (UpperCamelCase : list ) ->tuple[int, int]: '''simple docstring''' return len(UpperCamelCase ), len(matrix[0] ) def _A (UpperCamelCase : list ) ->None: '''simple docstring''' print("""\n""".join(str(UpperCamelCase ) for line in matrix ) ) def _A (UpperCamelCase : list , UpperCamelCase : list ) ->list: '''simple docstring''' if matrix_dimensions(UpperCamelCase ) == (2, 2): return default_matrix_multiplication(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Optional[Any] = split_matrix(UpperCamelCase ) lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ : Tuple = split_matrix(UpperCamelCase ) lowerCamelCase__ : Optional[Any] = actual_strassen(UpperCamelCase , matrix_subtraction(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase__ : Any = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) lowerCamelCase__ : Tuple = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) lowerCamelCase__ : Optional[Any] = actual_strassen(UpperCamelCase , matrix_subtraction(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase__ : List[str] = actual_strassen(matrix_addition(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase__ : Dict = actual_strassen(matrix_subtraction(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase__ : Tuple = actual_strassen(matrix_subtraction(UpperCamelCase , UpperCamelCase ) , matrix_addition(UpperCamelCase , UpperCamelCase ) ) lowerCamelCase__ : Dict = matrix_addition(matrix_subtraction(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) , UpperCamelCase ) lowerCamelCase__ : Union[str, Any] = matrix_addition(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : Tuple = matrix_addition(UpperCamelCase , UpperCamelCase ) lowerCamelCase__ : str = matrix_subtraction(matrix_subtraction(matrix_addition(UpperCamelCase , UpperCamelCase ) , UpperCamelCase ) , UpperCamelCase ) # construct the new matrix from our 4 quadrants lowerCamelCase__ : int = [] for i in range(len(UpperCamelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(UpperCamelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def _A (UpperCamelCase : list , UpperCamelCase : list ) ->list: '''simple docstring''' if matrix_dimensions(UpperCamelCase )[1] != matrix_dimensions(UpperCamelCase )[0]: lowerCamelCase__ : List[str] = ( """Unable to multiply these matrices, please check the dimensions.\n""" f"Matrix A: {matrixa}\n" f"Matrix B: {matrixa}" ) raise Exception(UpperCamelCase ) lowerCamelCase__ : Optional[int] = matrix_dimensions(UpperCamelCase ) lowerCamelCase__ : List[str] = matrix_dimensions(UpperCamelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] lowerCamelCase__ : Optional[int] = max(*UpperCamelCase , *UpperCamelCase ) lowerCamelCase__ : str = int(math.pow(2 , math.ceil(math.loga(UpperCamelCase ) ) ) ) lowerCamelCase__ : Optional[Any] = matrixa lowerCamelCase__ : List[str] = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , UpperCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) lowerCamelCase__ : str = actual_strassen(UpperCamelCase , UpperCamelCase ) # Removing the additional zeros for i in range(0 , UpperCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , UpperCamelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _lowercase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
157
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase_ = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
80
from __future__ import annotations def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): __lowerCamelCase, __lowerCamelCase = array[indexa], array[indexa] def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) for i in range(A__ , low + middle ): comp_and_swap(A__ , A__ , i + middle , A__ ) bitonic_merge(A__ , A__ , A__ , A__ ) bitonic_merge(A__ , low + middle , A__ , A__ ) def lowerCamelCase__ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if length > 1: __lowerCamelCase = int(length / 2 ) bitonic_sort(A__ , A__ , A__ , 1 ) bitonic_sort(A__ , low + middle , A__ , 0 ) bitonic_merge(A__ , A__ , A__ , A__ ) if __name__ == "__main__": UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(',')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('\nSorted array in ascending order is: ', end='') print(*unsorted, sep=', ') bitonic_merge(unsorted, 0, len(unsorted), 0) print('Sorted array in descending order is: ', end='') print(*unsorted, sep=', ')
80
1
from random import shuffle import tensorflow as tf from numpy import array def a__ ( snake_case__ : Tuple , snake_case__ : str ): _UpperCAmelCase : int = int(snake_case__ ) assert noofclusters < len(snake_case__ ) # Find out the dimensionality _UpperCAmelCase : Tuple = len(vectors[0] ) # Will help select random centroids from among the available vectors _UpperCAmelCase : List[Any] = list(range(len(snake_case__ ) ) ) shuffle(snake_case__ ) # GRAPH OF COMPUTATION # We initialize a new graph and set it as the default during each run # of this algorithm. This ensures that as this function is called # multiple times, the default graph doesn't keep getting crowded with # unused ops and Variables from previous function calls. _UpperCAmelCase : Optional[int] = tf.Graph() with graph.as_default(): # SESSION OF COMPUTATION _UpperCAmelCase : Dict = tf.Session() ##CONSTRUCTING THE ELEMENTS OF COMPUTATION ##First lets ensure we have a Variable vector for each centroid, ##initialized to one of the vectors from the available data points _UpperCAmelCase : Union[str, Any] = [ tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case__ ) ] ##These nodes will assign the centroid Variables the appropriate ##values _UpperCAmelCase : int = tf.placeholder("""float64""" , [dim] ) _UpperCAmelCase : List[str] = [] for centroid in centroids: cent_assigns.append(tf.assign(snake_case__ , snake_case__ ) ) ##Variables for cluster assignments of individual vectors(initialized ##to 0 at first) _UpperCAmelCase : List[str] = [tf.Variable(0 ) for i in range(len(snake_case__ ) )] ##These nodes will assign an assignment Variable the appropriate ##value _UpperCAmelCase : Optional[int] = tf.placeholder("""int32""" ) _UpperCAmelCase : List[Any] = [] for assignment in assignments: cluster_assigns.append(tf.assign(snake_case__ , snake_case__ ) ) ##Now lets construct the node that will compute the mean # The placeholder for the input _UpperCAmelCase : str = tf.placeholder("""float""" , [None, dim] ) # The Node/op takes the input and computes a mean along the 0th # dimension, i.e. the list of input vectors _UpperCAmelCase : Optional[int] = tf.reduce_mean(snake_case__ , 0 ) ##Node for computing Euclidean distances # Placeholders for input _UpperCAmelCase : str = tf.placeholder("""float""" , [dim] ) _UpperCAmelCase : str = tf.placeholder("""float""" , [dim] ) _UpperCAmelCase : str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case__ , snake_case__ ) , 2 ) ) ) ##This node will figure out which cluster to assign a vector to, ##based on Euclidean distances of the vector from the centroids. # Placeholder for input _UpperCAmelCase : Optional[int] = tf.placeholder("""float""" , [noofclusters] ) _UpperCAmelCase : Dict = tf.argmin(snake_case__ , 0 ) ##INITIALIZING STATE VARIABLES ##This will help initialization of all Variables defined with respect ##to the graph. The Variable-initializer should be defined after ##all the Variables have been constructed, so that each of them ##will be included in the initialization. _UpperCAmelCase : List[Any] = tf.initialize_all_variables() # Initialize all variables sess.run(snake_case__ ) ##CLUSTERING ITERATIONS # Now perform the Expectation-Maximization steps of K-Means clustering # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. _UpperCAmelCase : Optional[Any] = 100 for _ in range(snake_case__ ): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. # Iterate over each vector for vector_n in range(len(snake_case__ ) ): _UpperCAmelCase : List[Any] = vectors[vector_n] # Compute Euclidean distance between this vector and each # centroid. Remember that this list cannot be named #'centroid_distances', since that is the input to the # cluster assignment node. _UpperCAmelCase : List[Any] = [ sess.run(snake_case__ , feed_dict={va: vect, va: sess.run(snake_case__ )} ) for centroid in centroids ] # Now use the cluster assignment node, with the distances # as the input _UpperCAmelCase : List[Any] = sess.run( snake_case__ , feed_dict={centroid_distances: distances} ) # Now assign the value to the appropriate state variable sess.run( cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} ) ##MAXIMIZATION STEP # Based on the expected state computed from the Expectation Step, # compute the locations of the centroids so as to maximize the # overall objective of minimizing within-cluster Sum-of-Squares for cluster_n in range(snake_case__ ): # Collect all the vectors assigned to this cluster _UpperCAmelCase : Tuple = [ vectors[i] for i in range(len(snake_case__ ) ) if sess.run(assignments[i] ) == cluster_n ] # Compute new centroid location _UpperCAmelCase : Union[str, Any] = sess.run( snake_case__ , feed_dict={mean_input: array(snake_case__ )} ) # Assign value to appropriate variable sess.run( cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} ) # Return centroids and assignments _UpperCAmelCase : Union[str, Any] = sess.run(snake_case__ ) _UpperCAmelCase : Union[str, Any] = sess.run(snake_case__ ) return centroids, assignments
643
SCREAMING_SNAKE_CASE__ : dict[str, float] = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634e-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.355_818, } def a__ ( snake_case__ : str , snake_case__ : str , snake_case__ : float ): if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: _UpperCAmelCase : Optional[Any] = ( f'''Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n''' f'''Valid values are: {", ".join(snake_case__ )}''' ) raise ValueError(snake_case__ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
643
1
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _lowercase : str = mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: _lowercase : Dict = max( mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) , mf_knapsack(i - 1 , lowerCamelCase_ , lowerCamelCase_ , j - wt[i - 1] ) + val[i - 1] , ) _lowercase : Tuple = val return f[i][j] def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: _lowercase : str = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _lowercase : Dict = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _lowercase : Dict = dp[i - 1][w_] return dp[n][w_], dp def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any: if not (isinstance(lowerCamelCase_ , (list, tuple) ) and isinstance(lowerCamelCase_ , (list, tuple) )): raise ValueError( 'Both the weights and values vectors must be either lists or tuples' ) _lowercase : str = len(lowerCamelCase_ ) if num_items != len(lowerCamelCase_ ): _lowercase : Any = ( 'The number of weights must be the same as the number of values.\n' F'''But got {num_items} weights and {len(lowerCamelCase_ )} values''' ) raise ValueError(lowerCamelCase_ ) for i in range(lowerCamelCase_ ): if not isinstance(wt[i] , lowerCamelCase_ ): _lowercase : List[str] = ( 'All weights must be integers but got weight of ' F'''type {type(wt[i] )} at index {i}''' ) raise TypeError(lowerCamelCase_ ) _lowercase , _lowercase : List[Any] = knapsack(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) _lowercase : List[Any] = set() _construct_solution(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return optimal_val, example_optional_set def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , lowerCamelCase_ , lowerCamelCase_ ) else: optimal_set.add(lowerCamelCase_ ) _construct_solution(lowerCamelCase_ , lowerCamelCase_ , i - 1 , j - wt[i - 1] , lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : Optional[Any] =[3, 2, 4, 4] SCREAMING_SNAKE_CASE : Optional[Any] =[4, 3, 2, 3] SCREAMING_SNAKE_CASE : Optional[int] =4 SCREAMING_SNAKE_CASE : Dict =6 SCREAMING_SNAKE_CASE : int =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] =knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] =knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("optimal_value = ", optimal_solution) print("An optimal subset corresponding to the optimal value", optimal_subset)
711
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCamelCase: def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=[30, 30], lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase="gelu", lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=10, lowerCamelCase=0.0_2, lowerCamelCase=3, lowerCamelCase=None, lowerCamelCase=8, lowerCamelCase=10, ) -> Union[str, Any]: """simple docstring""" _lowercase : Optional[Any] = parent _lowercase : int = batch_size _lowercase : str = image_size _lowercase : Any = patch_size _lowercase : Optional[Any] = num_channels _lowercase : Union[str, Any] = is_training _lowercase : Dict = use_labels _lowercase : Optional[Any] = hidden_size _lowercase : Optional[int] = num_hidden_layers _lowercase : List[str] = num_attention_heads _lowercase : Optional[Any] = intermediate_size _lowercase : Tuple = hidden_act _lowercase : Union[str, Any] = hidden_dropout_prob _lowercase : str = attention_probs_dropout_prob _lowercase : int = type_sequence_label_size _lowercase : str = initializer_range _lowercase : Tuple = num_labels _lowercase : Any = scope _lowercase : Optional[Any] = n_targets _lowercase : List[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens _lowercase : Tuple = (image_size[1] // patch_size) * (image_size[0] // patch_size) _lowercase : str = num_patches + 1 + self.num_detection_tokens def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) _lowercase : str = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) _lowercase : Optional[Any] = [] for i in range(self.batch_size): _lowercase : Tuple = {} _lowercase : Dict = torch.randint( high=self.num_labels, size=(self.n_targets,), device=lowerCamelCase) _lowercase : str = torch.rand(self.n_targets, 4, device=lowerCamelCase) labels.append(lowerCamelCase) _lowercase : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self) -> List[Any]: """simple docstring""" return YolosConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, ) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Tuple: """simple docstring""" _lowercase : Dict = YolosModel(config=lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : Optional[int] = model(lowerCamelCase) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size)) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[Any]: """simple docstring""" _lowercase : Optional[int] = YolosForObjectDetection(lowerCamelCase) model.to(lowerCamelCase) model.eval() _lowercase : List[Any] = model(pixel_values=lowerCamelCase) _lowercase : Union[str, Any] = model(lowerCamelCase) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4)) _lowercase : Tuple = model(pixel_values=lowerCamelCase, labels=lowerCamelCase) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4)) def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : int = self.prepare_config_and_inputs() _lowercase , _lowercase , _lowercase : Dict = config_and_inputs _lowercase : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class _lowerCamelCase( _a, _a, unittest.TestCase ): lowercase_ : int = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowercase_ : Optional[Any] = ( {"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {} ) lowercase_ : Tuple = False lowercase_ : Optional[Any] = False lowercase_ : Tuple = False lowercase_ : Optional[Any] = False def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> str: """simple docstring""" _lowercase : List[Any] = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase) if return_labels: if model_class.__name__ == "YolosForObjectDetection": _lowercase : Dict = [] for i in range(self.model_tester.batch_size): _lowercase : List[Any] = {} _lowercase : str = torch.ones( size=(self.model_tester.n_targets,), device=lowerCamelCase, dtype=torch.long) _lowercase : List[str] = torch.ones( self.model_tester.n_targets, 4, device=lowerCamelCase, dtype=torch.float) labels.append(lowerCamelCase) _lowercase : Optional[int] = labels return inputs_dict def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : str = YolosModelTester(self) _lowercase : int = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37) def UpperCamelCase ( self) -> int: """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self) -> int: """simple docstring""" pass def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Union[str, Any] = model_class(lowerCamelCase) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) _lowercase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase, nn.Linear)) def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase , _lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Optional[int] = model_class(lowerCamelCase) _lowercase : Optional[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowercase : Union[str, Any] = [*signature.parameters.keys()] _lowercase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1], lowerCamelCase) def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase) def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : List[str] = True # in YOLOS, the seq_len is different _lowercase : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: _lowercase : Optional[Any] = True _lowercase : str = False _lowercase : Tuple = True _lowercase : Tuple = model_class(lowerCamelCase) model.to(lowerCamelCase) model.eval() with torch.no_grad(): _lowercase : int = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase)) _lowercase : Optional[int] = outputs.attentions self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] _lowercase : int = True _lowercase : Tuple = model_class(lowerCamelCase) model.to(lowerCamelCase) model.eval() with torch.no_grad(): _lowercase : Any = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase)) _lowercase : str = outputs.attentions self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) _lowercase : Optional[Any] = len(lowerCamelCase) # Check attention is always last and order is fine _lowercase : List[str] = True _lowercase : Union[str, Any] = True _lowercase : Any = model_class(lowerCamelCase) model.to(lowerCamelCase) model.eval() with torch.no_grad(): _lowercase : Dict = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase)) _lowercase : Dict = 1 self.assertEqual(out_len + added_hidden_states, len(lowerCamelCase)) _lowercase : Any = outputs.attentions self.assertEqual(len(lowerCamelCase), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase): _lowercase : Tuple = model_class(lowerCamelCase) model.to(lowerCamelCase) model.eval() with torch.no_grad(): _lowercase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase)) _lowercase : int = outputs.hidden_states _lowercase : Dict = getattr( self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1) self.assertEqual(len(lowerCamelCase), lowerCamelCase) # YOLOS has a different seq_length _lowercase : List[str] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) _lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowercase : Any = True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowercase : Union[str, Any] = True check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*lowerCamelCase) @slow def UpperCamelCase ( self) -> Dict: """simple docstring""" for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowercase : Optional[Any] = YolosModel.from_pretrained(lowerCamelCase) self.assertIsNotNone(lowerCamelCase) def UpperCamelCase_( ) -> List[str]: _lowercase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class _lowerCamelCase( unittest.TestCase ): @cached_property def UpperCamelCase ( self) -> Dict: """simple docstring""" return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None @slow def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : List[str] = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(lowerCamelCase) _lowercase : int = self.default_image_processor _lowercase : List[Any] = prepare_img() _lowercase : str = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase) # forward pass with torch.no_grad(): _lowercase : str = model(inputs.pixel_values) # verify outputs _lowercase : Optional[int] = torch.Size((1, 1_00, 92)) self.assertEqual(outputs.logits.shape, lowerCamelCase) _lowercase : Tuple = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]], device=lowerCamelCase, ) _lowercase : Dict = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]], device=lowerCamelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCamelCase, atol=1E-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCamelCase, atol=1E-4)) # verify postprocessing _lowercase : str = image_processor.post_process_object_detection( lowerCamelCase, threshold=0.3, target_sizes=[image.size[::-1]])[0] _lowercase : Union[str, Any] = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1]).to(lowerCamelCase) _lowercase : Optional[Any] = [75, 75, 17, 63, 17] _lowercase : Union[str, Any] = torch.tensor([3_3_5.0_6_0_9, 7_9.3_8_4_8, 3_7_5.4_2_1_6, 1_8_7.2_4_9_5]).to(lowerCamelCase) self.assertEqual(len(results['scores']), 5) self.assertTrue(torch.allclose(results['scores'], lowerCamelCase, atol=1E-4)) self.assertSequenceEqual(results['labels'].tolist(), lowerCamelCase) self.assertTrue(torch.allclose(results['boxes'][0, :], lowerCamelCase))
354
0
"""simple docstring""" import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging A_ = logging.get_logger(__name__) def _UpperCamelCase ( A , A ): UpperCamelCase_ =nn.functional.normalize(A ) UpperCamelCase_ =nn.functional.normalize(A ) return torch.mm(A , normalized_text_embeds.t() ) class __lowerCAmelCase ( UpperCAmelCase ): '''simple docstring''' __lowerCamelCase : Dict = CLIPConfig __lowerCamelCase : List[str] = ["CLIPEncoderLayer"] def __init__( self: List[Any] , UpperCamelCase_: CLIPConfig ): super().__init__(UpperCamelCase_ ) UpperCamelCase_ =CLIPVisionModel(config.vision_config ) UpperCamelCase_ =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase_ ) UpperCamelCase_ =nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase_ ) UpperCamelCase_ =nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase_ ) UpperCamelCase_ =nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase_ ) UpperCamelCase_ =nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase_ ) @torch.no_grad() def UpperCamelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Union[str, Any] ): UpperCamelCase_ =self.vision_model(UpperCamelCase_ )[1] # pooled_output UpperCamelCase_ =self.visual_projection(UpperCamelCase_ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase_ =cosine_distance(UpperCamelCase_ , self.special_care_embeds ).cpu().float().numpy() UpperCamelCase_ =cosine_distance(UpperCamelCase_ , self.concept_embeds ).cpu().float().numpy() UpperCamelCase_ =[] UpperCamelCase_ =image_embeds.shape[0] for i in range(UpperCamelCase_ ): UpperCamelCase_ ={"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase_ =0.0 for concept_idx in range(len(special_cos_dist[0] ) ): UpperCamelCase_ =special_cos_dist[i][concept_idx] UpperCamelCase_ =self.special_care_embeds_weights[concept_idx].item() UpperCamelCase_ =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} ) UpperCamelCase_ =0.01 for concept_idx in range(len(cos_dist[0] ) ): UpperCamelCase_ =cos_dist[i][concept_idx] UpperCamelCase_ =self.concept_embeds_weights[concept_idx].item() UpperCamelCase_ =round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(UpperCamelCase_ ) result.append(UpperCamelCase_ ) UpperCamelCase_ =[len(res["bad_concepts"] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase__ ( self: Tuple , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor ): UpperCamelCase_ =self.vision_model(UpperCamelCase_ )[1] # pooled_output UpperCamelCase_ =self.visual_projection(UpperCamelCase_ ) UpperCamelCase_ =cosine_distance(UpperCamelCase_ , self.special_care_embeds ) UpperCamelCase_ =cosine_distance(UpperCamelCase_ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images UpperCamelCase_ =0.0 UpperCamelCase_ =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) UpperCamelCase_ =torch.any(special_scores > 0 , dim=1 ) UpperCamelCase_ =special_care * 0.01 UpperCamelCase_ =special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) UpperCamelCase_ =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) UpperCamelCase_ =torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
391
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class __lowerCAmelCase : '''simple docstring''' def __init__( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, Any]=13 , UpperCamelCase_: List[str]=7 , UpperCamelCase_: str=6 , UpperCamelCase_: Tuple=17 , UpperCamelCase_: str=23 , UpperCamelCase_: List[str]=11 , UpperCamelCase_: List[str]=True , ): UpperCamelCase_ =parent UpperCamelCase_ =batch_size UpperCamelCase_ =seq_length UpperCamelCase_ =act_dim UpperCamelCase_ =state_dim UpperCamelCase_ =hidden_size UpperCamelCase_ =max_length UpperCamelCase_ =is_training def UpperCamelCase__ ( self: str ): UpperCamelCase_ =floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCamelCase_ =floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCamelCase_ =floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCamelCase_ =floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCamelCase_ =ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 ) UpperCamelCase_ =random_attention_mask((self.batch_size, self.seq_length) ) UpperCamelCase_ =self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def UpperCamelCase__ ( self: Tuple ): return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def UpperCamelCase__ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int] , ): UpperCamelCase_ =DecisionTransformerModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCamelCase_ =model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def UpperCamelCase__ ( self: List[Any] ): UpperCamelCase_ =self.prepare_config_and_inputs() ( ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ( UpperCamelCase_ ) , ) =config_and_inputs UpperCamelCase_ ={ "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class __lowerCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : Dict = (DecisionTransformerModel,) if is_torch_available() else () __lowerCamelCase : Dict = () __lowerCamelCase : List[Any] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids __lowerCamelCase : List[str] = False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features __lowerCamelCase : List[str] = False __lowerCamelCase : List[str] = False __lowerCamelCase : Dict = False __lowerCamelCase : int = False __lowerCamelCase : int = False __lowerCamelCase : List[str] = False __lowerCamelCase : int = False __lowerCamelCase : Tuple = False __lowerCamelCase : Optional[Any] = False def UpperCamelCase__ ( self: str ): UpperCamelCase_ =DecisionTransformerModelTester(self ) UpperCamelCase_ =ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def UpperCamelCase__ ( self: Optional[Any] ): self.config_tester.run_common_tests() def UpperCamelCase__ ( self: Tuple ): UpperCamelCase_ =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) @slow def UpperCamelCase__ ( self: Any ): for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase_ =DecisionTransformerModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def UpperCamelCase__ ( self: Union[str, Any] ): UpperCamelCase_ , UpperCamelCase_ =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase_ =model_class(UpperCamelCase_ ) UpperCamelCase_ =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase_ =[*signature.parameters.keys()] UpperCamelCase_ =[ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(UpperCamelCase_ )] , UpperCamelCase_ ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ ( self: Optional[int] ): UpperCamelCase_ =2 # number of steps of autoregressive prediction we will perform UpperCamelCase_ =10 # defined by the RL environment, may be normalized UpperCamelCase_ =DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCamelCase_ =model.to(UpperCamelCase_ ) UpperCamelCase_ =model.config torch.manual_seed(0 ) UpperCamelCase_ =torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ) # env.reset() UpperCamelCase_ =torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=UpperCamelCase_ ) UpperCamelCase_ =torch.tensor(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCamelCase_ =state UpperCamelCase_ =torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase_ , dtype=torch.floataa ) UpperCamelCase_ =torch.zeros(1 , 0 , device=UpperCamelCase_ , dtype=torch.floataa ) UpperCamelCase_ =torch.tensor(0 , device=UpperCamelCase_ , dtype=torch.long ).reshape(1 , 1 ) for step in range(UpperCamelCase_ ): UpperCamelCase_ =torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase_ )] , dim=1 ) UpperCamelCase_ =torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase_ )] , dim=1 ) UpperCamelCase_ =torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =model( states=UpperCamelCase_ , actions=UpperCamelCase_ , rewards=UpperCamelCase_ , returns_to_go=UpperCamelCase_ , timesteps=UpperCamelCase_ , attention_mask=UpperCamelCase_ , return_dict=UpperCamelCase_ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCamelCase_ =action_pred[0, -1] UpperCamelCase_ =torch.cat([states, state] , dim=1 ) UpperCamelCase_ =returns_to_go[0, -1] - reward UpperCamelCase_ =torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCamelCase_ =torch.cat( [timesteps, torch.ones((1, 1) , device=UpperCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
391
1
'''simple docstring''' from __future__ import annotations import time a= list[tuple[int, int]] a= [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] a= [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __lowercase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): __UpperCamelCase : Optional[int] = pos_x __UpperCamelCase : Union[str, Any] = pos_y __UpperCamelCase : Dict = (pos_y, pos_x) __UpperCamelCase : Optional[int] = goal_x __UpperCamelCase : Tuple = goal_y __UpperCamelCase : int = parent class __lowercase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase ): __UpperCamelCase : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , _lowerCamelCase ) __UpperCamelCase : str = Node(goal[1] , goal[0] , goal[1] , goal[0] , _lowerCamelCase ) __UpperCamelCase : str = [self.start] __UpperCamelCase : Union[str, Any] = False def lowerCAmelCase ( self ): while self.node_queue: __UpperCamelCase : Optional[Any] = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: __UpperCamelCase : Union[str, Any] = True return self.retrace_path(_lowerCamelCase ) __UpperCamelCase : Tuple = self.get_successors(_lowerCamelCase ) for node in successors: self.node_queue.append(_lowerCamelCase ) if not self.reached: return [self.start.pos] return None def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : List[str] = [] for action in delta: __UpperCamelCase : int = parent.pos_x + action[1] __UpperCamelCase : int = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(_lowerCamelCase , _lowerCamelCase , self.target.pos_y , self.target.pos_x , _lowerCamelCase ) ) return successors def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : Union[str, Any] = node __UpperCamelCase : List[Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __UpperCamelCase : Optional[int] = current_node.parent path.reverse() return path class __lowercase : """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase ): __UpperCamelCase : int = BreadthFirstSearch(_lowerCamelCase , _lowerCamelCase ) __UpperCamelCase : Union[str, Any] = BreadthFirstSearch(_lowerCamelCase , _lowerCamelCase ) __UpperCamelCase : Optional[int] = False def lowerCAmelCase ( self ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: __UpperCamelCase : List[str] = self.fwd_bfs.node_queue.pop(0 ) __UpperCamelCase : int = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: __UpperCamelCase : Tuple = True return self.retrace_bidirectional_path( _lowerCamelCase , _lowerCamelCase ) __UpperCamelCase : int = current_bwd_node __UpperCamelCase : Union[str, Any] = current_fwd_node __UpperCamelCase : Optional[int] = { self.fwd_bfs: self.fwd_bfs.get_successors(_lowerCamelCase ), self.bwd_bfs: self.bwd_bfs.get_successors(_lowerCamelCase ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(_lowerCamelCase ) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ): __UpperCamelCase : str = self.fwd_bfs.retrace_path(_lowerCamelCase ) __UpperCamelCase : Optional[Any] = self.bwd_bfs.retrace_path(_lowerCamelCase ) bwd_path.pop() bwd_path.reverse() __UpperCamelCase : Dict = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() a= (0, 0) a= (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) a= time.time() a= BreadthFirstSearch(init, goal) a= bfs.search() a= time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) a= time.time() a= BidirectionalBreadthFirstSearch(init, goal) a= bd_bfs.search() a= time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
287
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a= logging.get_logger(__name__) a= { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a= { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } a= {'''facebook/blenderbot-3B''': 1_2_8} class __lowercase ( _lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = BlenderbotTokenizer def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ): super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) __UpperCamelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space: __UpperCamelCase : Any = getattr(_lowerCamelCase , pre_tok_state.pop('type' ) ) __UpperCamelCase : Dict = add_prefix_space __UpperCamelCase : Optional[Any] = pre_tok_class(**_lowerCamelCase ) __UpperCamelCase : str = add_prefix_space __UpperCamelCase : Optional[int] = 'post_processor' __UpperCamelCase : Tuple = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: __UpperCamelCase : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __UpperCamelCase : List[Any] = tuple(state['sep'] ) if "cls" in state: __UpperCamelCase : str = tuple(state['cls'] ) __UpperCamelCase : Tuple = False if state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space: __UpperCamelCase : Dict = add_prefix_space __UpperCamelCase : str = True if state.get('trim_offsets' , _lowerCamelCase ) != trim_offsets: __UpperCamelCase : int = trim_offsets __UpperCamelCase : Any = True if changes_to_apply: __UpperCamelCase : Dict = getattr(_lowerCamelCase , state.pop('type' ) ) __UpperCamelCase : Any = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def lowerCAmelCase ( self ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : int = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value __UpperCamelCase : Optional[Any] = value def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ): __UpperCamelCase : Dict = kwargs.get('is_split_into_words' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ): __UpperCamelCase : Dict = kwargs.get('is_split_into_words' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ): __UpperCamelCase : List[str] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ): __UpperCamelCase : Union[str, Any] = [self.sep_token_id] __UpperCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ): return token_ids_a + [self.eos_token_id] def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __UpperCamelCase : Union[str, Any] = ' '.join(_lowerCamelCase ) __UpperCamelCase : Union[str, Any] = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __UpperCamelCase : Tuple = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
287
1
import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = None def a__ ( snake_case , snake_case=0.999 , snake_case="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __SCREAMING_SNAKE_CASE : Any = [] for i in range(snake_case ): __SCREAMING_SNAKE_CASE : int = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) ) return torch.tensor(snake_case , dtype=torch.floataa ) class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : List[Any] , _A : int = 1000 , _A : str = "fixed_small_log" , _A : bool = True , _A : Optional[float] = 1.0 , _A : str = "epsilon" , _A : str = "squaredcos_cap_v2" , ): """simple docstring""" if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) __SCREAMING_SNAKE_CASE : List[Any] = betas_for_alpha_bar(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = 1.0 - self.betas __SCREAMING_SNAKE_CASE : List[Any] = torch.cumprod(self.alphas , dim=0 ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(1.0 ) # standard deviation of the initial noise distribution __SCREAMING_SNAKE_CASE : Tuple = 1.0 # setable values __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Any = torch.from_numpy(np.arange(0 , _A )[::-1].copy() ) __SCREAMING_SNAKE_CASE : Tuple = variance_type def UpperCAmelCase__ ( self : int , _A : torch.FloatTensor , _A : Optional[int] = None ): """simple docstring""" return sample def UpperCAmelCase__ ( self : int , _A : int , _A : Union[str, torch.device] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = num_inference_steps __SCREAMING_SNAKE_CASE : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) __SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(np.intaa ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(_A ).to(_A ) def UpperCAmelCase__ ( self : List[str] , _A : List[Any] , _A : List[str]=None , _A : List[Any]=None , _A : List[str]=None ): """simple docstring""" if prev_timestep is None: __SCREAMING_SNAKE_CASE : Optional[int] = t - 1 __SCREAMING_SNAKE_CASE : str = self.alphas_cumprod[t] __SCREAMING_SNAKE_CASE : Tuple = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t __SCREAMING_SNAKE_CASE : Any = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __SCREAMING_SNAKE_CASE : List[Any] = self.betas[t] else: __SCREAMING_SNAKE_CASE : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __SCREAMING_SNAKE_CASE : Dict = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: __SCREAMING_SNAKE_CASE : Any = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": __SCREAMING_SNAKE_CASE : Any = torch.log(torch.clamp(_A , min=1e-20 ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler __SCREAMING_SNAKE_CASE : Optional[Any] = variance.log() __SCREAMING_SNAKE_CASE : Optional[int] = beta.log() __SCREAMING_SNAKE_CASE : Dict = (predicted_variance + 1) / 2 __SCREAMING_SNAKE_CASE : str = frac * max_log + (1 - frac) * min_log return variance def UpperCAmelCase__ ( self : Dict , _A : torch.FloatTensor , _A : int , _A : torch.FloatTensor , _A : Optional[int] = None , _A : List[str]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = torch.split(_A , sample.shape[1] , dim=1 ) else: __SCREAMING_SNAKE_CASE : Tuple = None # 1. compute alphas, betas if prev_timestep is None: __SCREAMING_SNAKE_CASE : List[str] = t - 1 __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[t] __SCREAMING_SNAKE_CASE : Dict = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t __SCREAMING_SNAKE_CASE : Optional[Any] = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __SCREAMING_SNAKE_CASE : str = self.betas[t] __SCREAMING_SNAKE_CASE : List[Any] = self.alphas[t] else: __SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev __SCREAMING_SNAKE_CASE : Any = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE : List[str] = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: __SCREAMING_SNAKE_CASE : Dict = torch.clamp( _A , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t __SCREAMING_SNAKE_CASE : Tuple = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __SCREAMING_SNAKE_CASE : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __SCREAMING_SNAKE_CASE : List[str] = 0 if t > 0: __SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=_A , device=model_output.device ) __SCREAMING_SNAKE_CASE : Any = self._get_variance( _A , predicted_variance=_A , prev_timestep=_A , ) if self.variance_type == "fixed_small_log": __SCREAMING_SNAKE_CASE : List[Any] = variance elif self.variance_type == "learned_range": __SCREAMING_SNAKE_CASE : str = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) __SCREAMING_SNAKE_CASE : List[Any] = variance * variance_noise __SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=_A , pred_original_sample=_A ) def UpperCAmelCase__ ( self : str , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.IntTensor , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) __SCREAMING_SNAKE_CASE : List[Any] = timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE : Union[str, Any] = alphas_cumprod[timesteps] ** 0.5 __SCREAMING_SNAKE_CASE : Dict = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE : Any = sqrt_alpha_prod.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE : List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 __SCREAMING_SNAKE_CASE : Any = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE : Optional[Any] = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
from __future__ import annotations _UpperCamelCase: Dict =8.9_88e9 # units = N * m^s * C^-2 def _a ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): """simple docstring""" _lowerCAmelCase = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if distance < 0: raise ValueError('Distance cannot be negative' ) if force == 0: _lowerCAmelCase = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: _lowerCAmelCase = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: _lowerCAmelCase = abs(__SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: _lowerCAmelCase = (COULOMBS_CONSTANT * charge_product / abs(__SCREAMING_SNAKE_CASE )) ** 0.5 return {"distance": distance} raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
717
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) _UpperCamelCase: int =logging.getLogger(__name__) def _a ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" _lowerCAmelCase = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 ) return np.sum(outputs == labels ) def _a ( __SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" with open(__SCREAMING_SNAKE_CASE , encoding='utf_8' ) as f: _lowerCAmelCase = csv.reader(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = [] next(__SCREAMING_SNAKE_CASE ) # skip the first line for line in tqdm(__SCREAMING_SNAKE_CASE ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _a ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" _lowerCAmelCase = [] for dataset in encoded_datasets: _lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) _lowerCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa ) _lowerCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) _lowerCAmelCase = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__SCREAMING_SNAKE_CASE ): _lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _lowerCAmelCase = with_conta _lowerCAmelCase = with_conta _lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) - 1 _lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) - 1 _lowerCAmelCase = with_conta _lowerCAmelCase = with_conta _lowerCAmelCase = mc_label _lowerCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__SCREAMING_SNAKE_CASE ) for t in all_inputs ) ) return tensor_datasets def _a ( ): """simple docstring""" _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('--model_name' , type=__SCREAMING_SNAKE_CASE , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=__SCREAMING_SNAKE_CASE , default='' ) parser.add_argument('--eval_dataset' , type=__SCREAMING_SNAKE_CASE , default='' ) parser.add_argument('--seed' , type=__SCREAMING_SNAKE_CASE , default=42 ) parser.add_argument('--num_train_epochs' , type=__SCREAMING_SNAKE_CASE , default=3 ) parser.add_argument('--train_batch_size' , type=__SCREAMING_SNAKE_CASE , default=8 ) parser.add_argument('--eval_batch_size' , type=__SCREAMING_SNAKE_CASE , default=16 ) parser.add_argument('--adam_epsilon' , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=__SCREAMING_SNAKE_CASE , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=__SCREAMING_SNAKE_CASE , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=__SCREAMING_SNAKE_CASE , default=6.25E-5 ) parser.add_argument('--warmup_steps' , default=0 , type=__SCREAMING_SNAKE_CASE , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=__SCREAMING_SNAKE_CASE , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=__SCREAMING_SNAKE_CASE , default=0.0_1 ) parser.add_argument('--lm_coef' , type=__SCREAMING_SNAKE_CASE , default=0.9 ) parser.add_argument('--n_valid' , type=__SCREAMING_SNAKE_CASE , default=374 ) parser.add_argument('--server_ip' , type=__SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=__SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' ) _lowerCAmelCase = parser.parse_args() print(__SCREAMING_SNAKE_CASE ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__SCREAMING_SNAKE_CASE ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) _lowerCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _lowerCAmelCase = torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset _lowerCAmelCase = ['_start_', '_delimiter_', '_classify_'] _lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__SCREAMING_SNAKE_CASE ) ) model.to(__SCREAMING_SNAKE_CASE ) # Load and encode the datasets def tokenize_and_encode(__SCREAMING_SNAKE_CASE : str ): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return obj return [tokenize_and_encode(__SCREAMING_SNAKE_CASE ) for o in obj] logger.info('Encoding dataset...' ) _lowerCAmelCase = load_rocstories_dataset(args.train_dataset ) _lowerCAmelCase = load_rocstories_dataset(args.eval_dataset ) _lowerCAmelCase = (train_dataset, eval_dataset) _lowerCAmelCase = tokenize_and_encode(__SCREAMING_SNAKE_CASE ) # Compute the max input length for the Transformer _lowerCAmelCase = model.config.n_positions // 2 - 2 _lowerCAmelCase = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) _lowerCAmelCase = min(__SCREAMING_SNAKE_CASE , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders _lowerCAmelCase = pre_process_datasets(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE ) _lowerCAmelCase , _lowerCAmelCase = tensor_datasets[0], tensor_datasets[1] _lowerCAmelCase = TensorDataset(*__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = RandomSampler(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE , batch_size=args.train_batch_size ) _lowerCAmelCase = TensorDataset(*__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = SequentialSampler(__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: _lowerCAmelCase = args.max_steps _lowerCAmelCase = args.max_steps // (len(__SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps) + 1 else: _lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) // args.gradient_accumulation_steps * args.num_train_epochs _lowerCAmelCase = list(model.named_parameters() ) _lowerCAmelCase = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] _lowerCAmelCase = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] _lowerCAmelCase = AdamW(__SCREAMING_SNAKE_CASE , lr=args.learning_rate , eps=args.adam_epsilon ) _lowerCAmelCase = get_linear_schedule_with_warmup( __SCREAMING_SNAKE_CASE , num_warmup_steps=args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE ) if args.do_train: _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): _lowerCAmelCase = 0 _lowerCAmelCase = 0 _lowerCAmelCase = tqdm(__SCREAMING_SNAKE_CASE , desc='Training' ) for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): _lowerCAmelCase = tuple(t.to(__SCREAMING_SNAKE_CASE ) for t in batch ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = batch _lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , mc_token_ids=__SCREAMING_SNAKE_CASE , lm_labels=__SCREAMING_SNAKE_CASE , mc_labels=__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() _lowerCAmelCase = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 _lowerCAmelCase = 'Training loss: {:.2e} lr: {:.2e}'.format(__SCREAMING_SNAKE_CASE , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer _lowerCAmelCase = model.module if hasattr(__SCREAMING_SNAKE_CASE , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` _lowerCAmelCase = os.path.join(args.output_dir , __SCREAMING_SNAKE_CASE ) _lowerCAmelCase = os.path.join(args.output_dir , __SCREAMING_SNAKE_CASE ) torch.save(model_to_save.state_dict() , __SCREAMING_SNAKE_CASE ) model_to_save.config.to_json_file(__SCREAMING_SNAKE_CASE ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned _lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) _lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__SCREAMING_SNAKE_CASE ) if args.do_eval: model.eval() _lowerCAmelCase , _lowerCAmelCase = 0, 0 _lowerCAmelCase , _lowerCAmelCase = 0, 0 for batch in tqdm(__SCREAMING_SNAKE_CASE , desc='Evaluating' ): _lowerCAmelCase = tuple(t.to(__SCREAMING_SNAKE_CASE ) for t in batch ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = batch with torch.no_grad(): _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = model( __SCREAMING_SNAKE_CASE , mc_token_ids=__SCREAMING_SNAKE_CASE , lm_labels=__SCREAMING_SNAKE_CASE , mc_labels=__SCREAMING_SNAKE_CASE ) _lowerCAmelCase = mc_logits.detach().cpu().numpy() _lowerCAmelCase = mc_labels.to('cpu' ).numpy() _lowerCAmelCase = accuracy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 _lowerCAmelCase = eval_loss / nb_eval_steps _lowerCAmelCase = eval_accuracy / nb_eval_examples _lowerCAmelCase = tr_loss / nb_tr_steps if args.do_train else None _lowerCAmelCase = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} _lowerCAmelCase = os.path.join(args.output_dir , 'eval_results.txt' ) with open(__SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , __SCREAMING_SNAKE_CASE , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
585
0
'''simple docstring''' import os from math import logaa def __magic_name__ ( __UpperCAmelCase = "base_exp.txt" ) -> int: '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = list(map(__UpperCAmelCase , line.split(""",""" ) ) ) if x * logaa(__UpperCAmelCase ) > largest: __SCREAMING_SNAKE_CASE = x * logaa(__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = i + 1 return result if __name__ == "__main__": print(solution())
109
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class lowercase ( a ): def __init__( self : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = process SCREAMING_SNAKE_CASE = params def __len__( self : List[str] ) -> Dict: '''simple docstring''' return len(self.dataset ) def __getitem__( self : Dict , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.dataset[i] SCREAMING_SNAKE_CASE = self.process(_UpperCamelCase , **self.params ) return processed class lowercase ( a ): def __init__( self : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=None ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = loader SCREAMING_SNAKE_CASE = infer SCREAMING_SNAKE_CASE = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = loader_batch_size # Internal bookkeeping SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None def __len__( self : Dict ) -> str: '''simple docstring''' return len(self.loader ) def __iter__( self : int ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = iter(self.loader ) return self def __snake_case( self : Any ) -> str: '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) SCREAMING_SNAKE_CASE = {} for k, element in self._loader_batch_data.items(): if isinstance(_UpperCamelCase , _UpperCamelCase ): # Convert ModelOutput to tuple first SCREAMING_SNAKE_CASE = element.to_tuple() if isinstance(element[0] , torch.Tensor ): SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_UpperCamelCase , _UpperCamelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around SCREAMING_SNAKE_CASE = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. SCREAMING_SNAKE_CASE = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(_UpperCamelCase ) self._loader_batch_index += 1 return result def __snake_case( self : Optional[int] ) -> int: '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch SCREAMING_SNAKE_CASE = next(self.iterator ) SCREAMING_SNAKE_CASE = self.infer(_UpperCamelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_UpperCamelCase , torch.Tensor ): SCREAMING_SNAKE_CASE = processed else: SCREAMING_SNAKE_CASE = list(processed.keys() )[0] SCREAMING_SNAKE_CASE = processed[key] if isinstance(_UpperCamelCase , _UpperCamelCase ): SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) else: SCREAMING_SNAKE_CASE = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. SCREAMING_SNAKE_CASE = observed_batch_size # Setting internal index to unwrap the batch SCREAMING_SNAKE_CASE = processed SCREAMING_SNAKE_CASE = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class lowercase ( a ): def __init__( self : int , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None ) -> List[str]: '''simple docstring''' super().__init__(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __iter__( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = iter(self.loader ) SCREAMING_SNAKE_CASE = None return self def __snake_case( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' if self.subiterator is None: SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item SCREAMING_SNAKE_CASE = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) SCREAMING_SNAKE_CASE = next(self.subiterator ) return processed class lowercase ( a ): def __iter__( self : Dict ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = iter(self.loader ) return self def __snake_case( self : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: SCREAMING_SNAKE_CASE = self.loader_batch_item() SCREAMING_SNAKE_CASE = item.pop("is_last" ) accumulator.append(_UpperCamelCase ) if is_last: return accumulator while not is_last: SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_UpperCamelCase , torch.Tensor ): SCREAMING_SNAKE_CASE = processed else: SCREAMING_SNAKE_CASE = list(processed.keys() )[0] SCREAMING_SNAKE_CASE = processed[key] if isinstance(_UpperCamelCase , _UpperCamelCase ): SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) else: SCREAMING_SNAKE_CASE = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. SCREAMING_SNAKE_CASE = observed_batch_size SCREAMING_SNAKE_CASE = processed SCREAMING_SNAKE_CASE = 0 while self._loader_batch_index < self.loader_batch_size: SCREAMING_SNAKE_CASE = self.loader_batch_item() SCREAMING_SNAKE_CASE = item.pop("is_last" ) accumulator.append(_UpperCamelCase ) if is_last: return accumulator else: SCREAMING_SNAKE_CASE = processed SCREAMING_SNAKE_CASE = item.pop("is_last" ) accumulator.append(_UpperCamelCase ) return accumulator class lowercase ( a ): def __init__( self : List[str] , _UpperCamelCase : Dataset , _UpperCamelCase : str ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = key def __len__( self : str ) -> List[Any]: '''simple docstring''' return len(self.dataset ) def __getitem__( self : Dict , _UpperCamelCase : str ) -> List[Any]: '''simple docstring''' return self.dataset[i][self.key] class lowercase ( a ): def __init__( self : int , _UpperCamelCase : Dataset , _UpperCamelCase : str , _UpperCamelCase : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = dataset SCREAMING_SNAKE_CASE = keya SCREAMING_SNAKE_CASE = keya def __len__( self : str ) -> str: '''simple docstring''' return len(self.dataset ) def __getitem__( self : str , _UpperCamelCase : Optional[Any] ) -> Any: '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
403
0
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ): while second != 0: lowerCamelCase_ = first & second first ^= second lowerCamelCase_ = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() __A =int(input('''Enter the first number: ''').strip()) __A =int(input('''Enter the second number: ''').strip()) print(F"""{add(first, second) = }""")
711
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig __A =logging.get_logger(__name__) # General docstring __A ='''RegNetConfig''' # Base docstring __A ='''facebook/regnet-y-040''' __A =[1, 1_0_8_8, 7, 7] # Image classification docstring __A ='''facebook/regnet-y-040''' __A ='''tabby, tabby cat''' __A =[ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , ) -> Dict: super().__init__() lowerCamelCase_ = nn.Convad( lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=kernel_size // 2 , groups=lowercase , bias=lowercase , ) lowerCamelCase_ = nn.BatchNormad(lowercase ) lowerCamelCase_ = ACTaFN[activation] if activation is not None else nn.Identity() def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Optional[Any]: lowerCamelCase_ = self.convolution(lowercase ) lowerCamelCase_ = self.normalization(lowercase ) lowerCamelCase_ = self.activation(lowercase ) return hidden_state class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase ) -> List[Any]: super().__init__() lowerCamelCase_ = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) lowerCamelCase_ = config.num_channels def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]: lowerCamelCase_ = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) lowerCamelCase_ = self.embedder(lowercase ) return hidden_state class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase , lowercase , lowercase = 2 ) -> List[str]: super().__init__() lowerCamelCase_ = nn.Convad(lowercase , lowercase , kernel_size=1 , stride=lowercase , bias=lowercase ) lowerCamelCase_ = nn.BatchNormad(lowercase ) def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Tensor: lowerCamelCase_ = self.convolution(lowercase ) lowerCamelCase_ = self.normalization(lowercase ) return hidden_state class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase , lowercase ) -> List[Any]: super().__init__() lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) ) lowerCamelCase_ = nn.Sequential( nn.Convad(lowercase , lowercase , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase , lowercase , kernel_size=1 ) , nn.Sigmoid() , ) def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Union[str, Any]: # b c h w -> b c 1 1 lowerCamelCase_ = self.pooler(lowercase ) lowerCamelCase_ = self.attention(lowercase ) lowerCamelCase_ = hidden_state * attention return hidden_state class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 ) -> int: super().__init__() lowerCamelCase_ = in_channels != out_channels or stride != 1 lowerCamelCase_ = max(1 , out_channels // config.groups_width ) lowerCamelCase_ = ( RegNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity() ) lowerCamelCase_ = nn.Sequential( RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , ) lowerCamelCase_ = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Dict: lowerCamelCase_ = hidden_state lowerCamelCase_ = self.layer(lowercase ) lowerCamelCase_ = self.shortcut(lowercase ) hidden_state += residual lowerCamelCase_ = self.activation(lowercase ) return hidden_state class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 ) -> Dict: super().__init__() lowerCamelCase_ = in_channels != out_channels or stride != 1 lowerCamelCase_ = max(1 , out_channels // config.groups_width ) lowerCamelCase_ = ( RegNetShortCut(lowercase , lowercase , stride=lowercase ) if should_apply_shortcut else nn.Identity() ) lowerCamelCase_ = nn.Sequential( RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase , lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act ) , RegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase , lowercase , kernel_size=1 , activation=lowercase ) , ) lowerCamelCase_ = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[str]: lowerCamelCase_ = hidden_state lowerCamelCase_ = self.layer(lowercase ) lowerCamelCase_ = self.shortcut(lowercase ) hidden_state += residual lowerCamelCase_ = self.activation(lowercase ) return hidden_state class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , ) -> Optional[int]: super().__init__() lowerCamelCase_ = RegNetXLayer if config.layer_type == "x" else RegNetYLayer lowerCamelCase_ = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase , lowercase , lowercase , stride=lowercase , ) , *[layer(lowercase , lowercase , lowercase ) for _ in range(depth - 1 )] , ) def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int: lowerCamelCase_ = self.layers(lowercase ) return hidden_state class _SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , lowercase ) -> int: super().__init__() lowerCamelCase_ = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) lowerCamelCase_ = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowercase , config.depths[1:] ): self.stages.append(RegNetStage(lowercase , lowercase , lowercase , depth=lowercase ) ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = False , lowercase = True ) -> BaseModelOutputWithNoAttention: lowerCamelCase_ = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: lowerCamelCase_ = hidden_states + (hidden_state,) lowerCamelCase_ = stage_module(lowercase ) if output_hidden_states: lowerCamelCase_ = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) class _SCREAMING_SNAKE_CASE ( snake_case_ ): lowerCAmelCase__ = RegNetConfig lowerCAmelCase__ = 'regnet' lowerCAmelCase__ = 'pixel_values' lowerCAmelCase__ = True def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any: if isinstance(lowercase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(lowercase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=False ) -> Any: if isinstance(lowercase , lowercase ): lowerCamelCase_ = value __A =R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __A =R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , snake_case_ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class _SCREAMING_SNAKE_CASE ( snake_case_ ): def __init__( self , lowercase ) -> List[str]: super().__init__(lowercase ) lowerCamelCase_ = config lowerCamelCase_ = RegNetEmbeddings(lowercase ) lowerCamelCase_ = RegNetEncoder(lowercase ) lowerCamelCase_ = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None ) -> BaseModelOutputWithPoolingAndNoAttention: lowerCamelCase_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ = self.embedder(lowercase ) lowerCamelCase_ = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase ) lowerCamelCase_ = encoder_outputs[0] lowerCamelCase_ = self.pooler(lowercase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case_ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class _SCREAMING_SNAKE_CASE ( snake_case_ ): def __init__( self , lowercase ) -> Any: super().__init__(lowercase ) lowerCamelCase_ = config.num_labels lowerCamelCase_ = RegNetModel(lowercase ) # classification head lowerCamelCase_ = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def SCREAMING_SNAKE_CASE_( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ) -> ImageClassifierOutputWithNoAttention: lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict lowerCamelCase_ = self.regnet(lowercase , output_hidden_states=lowercase , return_dict=lowercase ) lowerCamelCase_ = outputs.pooler_output if return_dict else outputs[1] lowerCamelCase_ = self.classifier(lowercase ) lowerCamelCase_ = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowerCamelCase_ = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowerCamelCase_ = "single_label_classification" else: lowerCamelCase_ = "multi_label_classification" if self.config.problem_type == "regression": lowerCamelCase_ = MSELoss() if self.num_labels == 1: lowerCamelCase_ = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowerCamelCase_ = loss_fct(lowercase , lowercase ) elif self.config.problem_type == "single_label_classification": lowerCamelCase_ = CrossEntropyLoss() lowerCamelCase_ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowerCamelCase_ = BCEWithLogitsLoss() lowerCamelCase_ = loss_fct(lowercase , lowercase ) if not return_dict: lowerCamelCase_ = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
313
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" _snake_case : str = """resnet""" _snake_case : Any = ["""basic""", """bottleneck"""] def __init__( self :List[Any] , lowerCamelCase__ :Union[str, Any]=3 , lowerCamelCase__ :str=64 , lowerCamelCase__ :Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , lowerCamelCase__ :List[str]=[3, 4, 6, 3] , lowerCamelCase__ :Any="bottleneck" , lowerCamelCase__ :List[Any]="relu" , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :int=None , lowerCamelCase__ :str=None , **lowerCamelCase__ :Optional[int] , ): super().__init__(**lowerCamelCase__ ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) UpperCamelCase__ :List[str] = num_channels UpperCamelCase__ :List[Any] = embedding_size UpperCamelCase__ :str = hidden_sizes UpperCamelCase__ :Optional[Any] = depths UpperCamelCase__ :Any = layer_type UpperCamelCase__ :List[str] = hidden_act UpperCamelCase__ :int = downsample_in_first_stage UpperCamelCase__ :str = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase__ ) + 1 )] UpperCamelCase__ , UpperCamelCase__ :List[Any] = get_aligned_output_features_output_indices( out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names ) class lowerCAmelCase_ ( lowercase ): """simple docstring""" _snake_case : Dict = version.parse("""1.11""" ) @property def __a ( self :str ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __a ( self :Dict ): return 1e-3
45
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class UpperCAmelCase : def __init__(self : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int]=sys.maxsize ) -> Optional[Any]: lowercase = "bilinear" lowercase = max_size lowercase = short_edge_length def __call__(self : Union[str, Any] , A__ : Optional[int] ) -> Tuple: lowercase = [] for img in imgs: lowercase , lowercase = img.shape[:2] # later: provide list and randomly choose index for resize lowercase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowercase = size * 1.0 / min(A__ , A__ ) if h < w: lowercase , lowercase = size, scale * w else: lowercase , lowercase = scale * h, size if max(A__ , A__ ) > self.max_size: lowercase = self.max_size * 1.0 / max(A__ , A__ ) lowercase = newh * scale lowercase = neww * scale lowercase = int(neww + 0.5 ) lowercase = int(newh + 0.5 ) if img.dtype == np.uinta: lowercase = Image.fromarray(A__ ) lowercase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowercase = np.asarray(A__ ) else: lowercase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowercase = nn.functional.interpolate( A__ , (newh, neww) , mode=self.interp_method , align_corners=A__ ).squeeze(0 ) img_augs.append(A__ ) return img_augs class UpperCAmelCase : def __init__(self : Union[str, Any] , A__ : List[Any] ) -> Optional[int]: lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowercase = cfg.INPUT.FORMAT lowercase = cfg.SIZE_DIVISIBILITY lowercase = cfg.PAD_VALUE lowercase = cfg.INPUT.MAX_SIZE_TEST lowercase = cfg.MODEL.DEVICE lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowercase = lambda A__ : (x - self.pixel_mean) / self.pixel_std def UpperCAmelCase__ (self : List[Any] , A__ : Any ) -> int: lowercase = tuple(max(A__ ) for s in zip(*[img.shape for img in images] ) ) lowercase = [im.shape[-2:] for im in images] lowercase = [ nn.functional.pad( A__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(A__ , A__ ) ] return torch.stack(A__ ), torch.tensor(A__ ) def __call__(self : Optional[int] , A__ : Union[str, Any] , A__ : Optional[Any]=False ) -> str: with torch.no_grad(): if not isinstance(A__ , A__ ): lowercase = [images] if single_image: assert len(A__ ) == 1 for i in range(len(A__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(A__ , images.pop(A__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( A__ , torch.as_tensor(img_tensorize(images.pop(A__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowercase = torch.tensor([im.shape[:2] for im in images] ) lowercase = self.aug(A__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowercase = [self.normalizer(A__ ) for x in images] # now pad them to do the following operations lowercase , lowercase = self.pad(A__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowercase = torch.true_divide(A__ , A__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" assert torch.isfinite(lowerCAmelCase_ ).all(), "Box tensor contains infinite or NaN!" lowercase , lowercase = box_size tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase_ ) tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase_ ) tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase_ ) tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase_ )
310
0
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED UpperCAmelCase__ = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } UpperCAmelCase__ = { "allenai/led-base-16384": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def A ( ) -> Dict: '''simple docstring''' _UpperCAmelCase = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _UpperCAmelCase = bs[:] _UpperCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(_UpperCAmelCase ) cs.append(2**8 + n ) n += 1 _UpperCAmelCase = [chr(_UpperCAmelCase ) for n in cs] return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) def A ( _UpperCAmelCase : Union[str, Any] ) -> Dict: '''simple docstring''' _UpperCAmelCase = set() _UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _UpperCAmelCase = char return pairs class __lowerCAmelCase ( A ): UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] , A : int , A : Any , A : Tuple="replace" , A : Any="<s>" , A : List[Any]="</s>" , A : int="</s>" , A : List[Any]="<s>" , A : Any="<unk>" , A : Tuple="<pad>" , A : Optional[Any]="<mask>" , A : str=False , **A : Union[str, Any] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else bos_token _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else eos_token _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else sep_token _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else cls_token _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else unk_token _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else pad_token # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token super().__init__( errors=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , add_prefix_space=A , **A , ) with open(A , encoding='utf-8') as vocab_handle: _UpperCAmelCase = json.load(A) _UpperCAmelCase = {v: k for k, v in self.encoder.items()} _UpperCAmelCase = errors # how to handle errors in decoding _UpperCAmelCase = bytes_to_unicode() _UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(A , encoding='utf-8') as merges_handle: _UpperCAmelCase = merges_handle.read().split('\n')[1:-1] _UpperCAmelCase = [tuple(merge.split()) for merge in bpe_merges] _UpperCAmelCase = dict(zip(A , range(len(A)))) _UpperCAmelCase = {} _UpperCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _UpperCAmelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+') @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _lowerCamelCase ( self : str) -> Optional[Any]: """simple docstring""" return len(self.encoder) def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def _lowerCamelCase ( self : Optional[Any] , A : Dict) -> List[Any]: """simple docstring""" if token in self.cache: return self.cache[token] _UpperCAmelCase = tuple(A) _UpperCAmelCase = get_pairs(A) if not pairs: return token while True: _UpperCAmelCase = min(A , key=lambda A: self.bpe_ranks.get(A , float('inf'))) if bigram not in self.bpe_ranks: break _UpperCAmelCase , _UpperCAmelCase = bigram _UpperCAmelCase = [] _UpperCAmelCase = 0 while i < len(A): try: _UpperCAmelCase = word.index(A , A) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) _UpperCAmelCase = j if word[i] == first and i < len(A) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 _UpperCAmelCase = tuple(A) _UpperCAmelCase = new_word if len(A) == 1: break else: _UpperCAmelCase = get_pairs(A) _UpperCAmelCase = ' '.join(A) _UpperCAmelCase = word return word def _lowerCamelCase ( self : Optional[int] , A : Optional[int]) -> int: """simple docstring""" _UpperCAmelCase = [] for token in re.findall(self.pat , A): _UpperCAmelCase = ''.join( self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A).split(' ')) return bpe_tokens def _lowerCamelCase ( self : Any , A : Optional[Any]) -> Union[str, Any]: """simple docstring""" return self.encoder.get(A , self.encoder.get(self.unk_token)) def _lowerCamelCase ( self : str , A : int) -> Union[str, Any]: """simple docstring""" return self.decoder.get(A) def _lowerCamelCase ( self : List[Any] , A : int) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = ''.join(A) _UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors) return text def _lowerCamelCase ( self : Any , A : str , A : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return _UpperCAmelCase = os.path.join( A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) _UpperCAmelCase = os.path.join( A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(A , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A , ensure_ascii=A) + '\n') _UpperCAmelCase = 0 with open(A , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A: kv[1]): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ' Please check that the tokenizer is not corrupted!') _UpperCAmelCase = token_index writer.write(' '.join(A) + '\n') index += 1 return vocab_file, merge_file def _lowerCamelCase ( self : Dict , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] _UpperCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self : str , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A) if token_ids_a is None: return [1] + ([0] * len(A)) + [1] return [1] + ([0] * len(A)) + [1, 1] + ([0] * len(A)) + [1] def _lowerCamelCase ( self : List[str] , A : List[int] , A : Optional[List[int]] = None) -> List[int]: """simple docstring""" _UpperCAmelCase = [self.sep_token_id] _UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def _lowerCamelCase ( self : Any , A : Optional[int] , A : str=False , **A : List[Any]) -> Dict: """simple docstring""" _UpperCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(A) > 0 and not text[0].isspace()): _UpperCAmelCase = ' ' + text return (text, kwargs) def _lowerCamelCase ( self : List[Any] , A : Union[Dict[str, EncodedInput], BatchEncoding] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ) -> dict: """simple docstring""" _UpperCAmelCase = super()._pad( encoded_inputs=A , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , ) # Load from model defaults if return_attention_mask is None: _UpperCAmelCase = 'attention_mask' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _UpperCAmelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _UpperCAmelCase = len(encoded_inputs['global_attention_mask']) != len(A) if needs_to_be_padded: _UpperCAmelCase = len(A) - len(encoded_inputs['global_attention_mask']) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _UpperCAmelCase = ( encoded_inputs['global_attention_mask'] + [-1] * difference ) elif self.padding_side == "left": _UpperCAmelCase = [-1] * difference + encoded_inputs[ 'global_attention_mask' ] else: raise ValueError('Invalid padding strategy:' + str(self.padding_side)) return encoded_inputs
639
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __lowerCAmelCase : @staticmethod def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]: """simple docstring""" pass @is_pipeline_test @require_vision class __lowerCAmelCase ( unittest.TestCase ): @require_torch def _lowerCamelCase ( self : List[str]) -> Tuple: """simple docstring""" _UpperCAmelCase = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , ) _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') _UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c']) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(A) , [ [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}], [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}], ] , ) _UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2) self.assertEqual( nested_simplify(A) , [ [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], ] , ) @require_tf def _lowerCamelCase ( self : str) -> Tuple: """simple docstring""" _UpperCAmelCase = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf') _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') _UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c']) self.assertEqual( nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , ) _UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2) self.assertEqual( nested_simplify(A) , [ [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], [ {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, {'score': 0.3_3_3, 'label': ANY(A)}, ], ] , ) @slow @require_torch def _lowerCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , ) # This is an image of 2 cats with remotes and no planes _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') _UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote']) self.assertEqual( nested_simplify(A) , [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ] , ) _UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2) self.assertEqual( nested_simplify(A) , [ [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ], ] * 5 , ) @slow @require_tf def _lowerCamelCase ( self : List[str]) -> Any: """simple docstring""" _UpperCAmelCase = pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf') # This is an image of 2 cats with remotes and no planes _UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') _UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote']) self.assertEqual( nested_simplify(A) , [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ] , ) _UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2) self.assertEqual( nested_simplify(A) , [ [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ], ] * 5 , )
639
1
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class SCREAMING_SNAKE_CASE__ : def __init__( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] ): if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""" ) lowerCAmelCase = img lowerCAmelCase = img.shape[1] lowerCAmelCase = img.shape[0] lowerCAmelCase = dst_width lowerCAmelCase = dst_height lowerCAmelCase = self.src_w / self.dst_w lowerCAmelCase = self.src_h / self.dst_h lowerCAmelCase = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255 ) def __lowercase ( self : List[Any] ): for i in range(self.dst_h ): for j in range(self.dst_w ): lowerCAmelCase = self.img[self.get_y(__lowercase )][self.get_x(__lowercase )] def __lowercase ( self : int , lowerCAmelCase : str ): return int(self.ratio_x * x ) def __lowercase ( self : str , lowerCAmelCase : str ): return int(self.ratio_y * y ) if __name__ == "__main__": a = 8_0_0, 6_0_0 a = imread('image_data/lena.jpg', 1) a = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output ) waitKey(0) destroyAllWindows()
169
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase_ : int = logging.get_logger(__name__) UpperCAmelCase_ : List[Any] = { 'facebook/deit-base-distilled-patch16-224': ( 'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class _lowerCamelCase ( snake_case_ ): '''simple docstring''' __lowercase : Union[str, Any] = '''deit''' def __init__( self , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=1E-12 , __lowercase=224 , __lowercase=16 , __lowercase=3 , __lowercase=True , __lowercase=16 , **__lowercase , ): """simple docstring""" super().__init__(**__lowercase ) __A : Union[str, Any] = hidden_size __A : int = num_hidden_layers __A : str = num_attention_heads __A : Optional[int] = intermediate_size __A : List[str] = hidden_act __A : str = hidden_dropout_prob __A : str = attention_probs_dropout_prob __A : int = initializer_range __A : Tuple = layer_norm_eps __A : List[Any] = image_size __A : Dict = patch_size __A : Optional[Any] = num_channels __A : List[Any] = qkv_bias __A : str = encoder_stride class _lowerCamelCase ( snake_case_ ): '''simple docstring''' __lowercase : Optional[int] = version.parse('''1.11''' ) @property def snake_case__ ( self ): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def snake_case__ ( self ): """simple docstring""" return 1E-4
365
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase : List[str] = logging.get_logger(__name__) __UpperCamelCase : int = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def snake_case_ ( __lowercase ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: UpperCAmelCase_ : Union[str, Any] = k.replace(__lowercase , __lowercase ) if k.startswith('''encoder''' ): UpperCAmelCase_ : str = k.replace('''.attn''' , '''.self_attn''' ) UpperCAmelCase_ : int = k.replace('''norm1''' , '''self_attn_layer_norm''' ) UpperCAmelCase_ : Dict = k.replace('''norm2''' , '''final_layer_norm''' ) elif k.startswith('''decoder''' ): UpperCAmelCase_ : Any = k.replace('''norm1''' , '''self_attn_layer_norm''' ) UpperCAmelCase_ : Optional[Any] = k.replace('''norm2''' , '''encoder_attn_layer_norm''' ) UpperCAmelCase_ : List[str] = k.replace('''norm3''' , '''final_layer_norm''' ) return k def snake_case_ ( __lowercase ): UpperCAmelCase_ : Union[str, Any] = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: UpperCAmelCase_ : Tuple = sd.pop(__lowercase ) UpperCAmelCase_ : Union[str, Any] = k.replace('''layernorm_embedding''' , '''layer_norm''' ) assert new_k not in sd UpperCAmelCase_ : Union[str, Any] = v __UpperCamelCase : Union[str, Any] = ['START'] @torch.no_grad() def snake_case_ ( __lowercase , __lowercase , __lowercase ): UpperCAmelCase_ : Dict = torch.load(__lowercase , map_location='''cpu''' ) UpperCAmelCase_ : Any = model['''model'''] UpperCAmelCase_ : Any = BlenderbotConfig.from_json_file(__lowercase ) UpperCAmelCase_ : Optional[int] = BlenderbotForConditionalGeneration(__lowercase ) UpperCAmelCase_ : List[str] = m.model.state_dict().keys() UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Any = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue UpperCAmelCase_ : str = rename_state_dict_key(__lowercase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: UpperCAmelCase_ : Optional[int] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__lowercase ) m.model.load_state_dict(__lowercase , strict=__lowercase ) m.half() m.save_pretrained(__lowercase ) if __name__ == "__main__": __UpperCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) __UpperCamelCase : Optional[int] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
641
from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : str = logging.get_logger(__name__) __UpperCamelCase : Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class lowerCAmelCase__( snake_case__ ): '''simple docstring''' A_ : int = 'falcon' A_ : int = ['past_key_values'] def __init__( self : Optional[Any] , __snake_case : Tuple=65_024 , __snake_case : List[str]=4_544 , __snake_case : Optional[Any]=32 , __snake_case : Any=71 , __snake_case : str=1E-5 , __snake_case : List[str]=0.02 , __snake_case : List[Any]=True , __snake_case : Dict=0.0 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=None , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Optional[int]=True , __snake_case : List[Any]=True , __snake_case : Optional[Any]=False , __snake_case : Dict=11 , __snake_case : List[str]=11 , **__snake_case : int , ): '''simple docstring''' UpperCAmelCase_ : int = vocab_size # Backward compatibility with n_embed kwarg UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''n_embed''' , __snake_case ) UpperCAmelCase_ : str = hidden_size if n_embed is None else n_embed UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : Optional[int] = layer_norm_epsilon UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Optional[int] = use_cache UpperCAmelCase_ : List[Any] = hidden_dropout UpperCAmelCase_ : Any = attention_dropout UpperCAmelCase_ : Tuple = bos_token_id UpperCAmelCase_ : List[Any] = eos_token_id UpperCAmelCase_ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads UpperCAmelCase_ : Optional[int] = alibi UpperCAmelCase_ : Dict = new_decoder_architecture UpperCAmelCase_ : List[Any] = multi_query # Ignored when new_decoder_architecture is True UpperCAmelCase_ : Tuple = parallel_attn UpperCAmelCase_ : List[Any] = bias super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case ) @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return self.hidden_size // self.num_attention_heads @property def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' return not self.alibi
641
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase__ : int = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys UpperCamelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
614
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 UpperCamelCase__ : str = get_tests_dir('fixtures') class _lowercase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' UpperCAmelCase__ : Optional[int] = mock.Mock() UpperCAmelCase__ : Dict = 500 UpperCAmelCase__ : Optional[int] = {} UpperCAmelCase__ : List[str] = HTTPError UpperCAmelCase__ : Tuple = {} # Download this model to make sure it's in the cache. UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' ,return_value=lowerCamelCase_ ) as mock_head: UpperCAmelCase__ : Tuple = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase__ : Dict = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' with self.assertRaises(lowerCamelCase_ ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase__ : int = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) UpperCAmelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' ,subfolder='''feature_extractor''' ) self.assertIsNotNone(lowerCamelCase_ ) @is_staging_test class _lowercase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowerCAmelCase__ ( cls ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : Optional[int] = TOKEN HfFolder.save_token(lowerCamelCase_ ) @classmethod def lowerCAmelCase__ ( cls ) -> List[str]: '''simple docstring''' try: delete_repo(token=cls._token ,repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = ViTImageProcessor.from_pretrained(lowerCamelCase_ ) image_processor.push_to_hub('''test-image-processor''' ,use_auth_token=self._token ) UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) ) # Reset repo delete_repo(token=self._token ,repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( lowerCamelCase_ ,repo_id='''test-image-processor''' ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase__ : List[Any] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(lowerCamelCase_ ) image_processor.push_to_hub('''valid_org/test-image-processor''' ,use_auth_token=self._token ) UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) ) # Reset repo delete_repo(token=self._token ,repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( lowerCamelCase_ ,repo_id='''valid_org/test-image-processor-org''' ,push_to_hub=lowerCamelCase_ ,use_auth_token=self._token ) UpperCAmelCase__ : Any = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(lowerCamelCase_ ,getattr(lowerCamelCase_ ,lowerCamelCase_ ) ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' CustomImageProcessor.register_for_auto_class() UpperCAmelCase__ : List[str] = CustomImageProcessor.from_pretrained(lowerCamelCase_ ) image_processor.push_to_hub('''test-dynamic-image-processor''' ,use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map ,{'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} ,) UpperCAmelCase__ : Any = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' ,trust_remote_code=lowerCamelCase_ ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ ,'''CustomImageProcessor''' )
614
1
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __lowerCAmelCase : Tuple ="""base_with_context""" def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]: '''simple docstring''' lowercase = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) ) lowercase = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): lowercase = weights[f'layers_{lyr_num}'] lowercase = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) lowercase = ly_weight["""attention"""] lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :str ) -> Any: '''simple docstring''' lowercase = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) ) lowercase = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase__ ) for lyr_num, lyr in enumerate(model.encoders ): lowercase = weights[f'layers_{lyr_num}'] lowercase = ly_weight["""attention"""] lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) lowercase = nn.Parameter( torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) lowercase = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) ) return model def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :str ) -> Optional[Any]: '''simple docstring''' lowercase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) ) lowercase = nn.Parameter( torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowerCAmelCase__ ) lowercase = nn.Parameter( torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) ) for lyr_num, lyr in enumerate(model.decoders ): lowercase = weights[f'layers_{lyr_num}'] lowercase = nn.Parameter( torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) ) lowercase = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) lowercase = ly_weight["""self_attention"""] lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) lowercase = ly_weight["""MultiHeadDotProductAttention_0"""] lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) ) lowercase = nn.Parameter( torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) ) lowercase = nn.Parameter( torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) ) lowercase = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) ) lowercase = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) ) return model def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]: '''simple docstring''' lowercase = checkpoints.load_tax_checkpoint(args.checkpoint_path ) lowercase = jnp.tree_util.tree_map(onp.array , lowerCAmelCase__ ) lowercase = [ """from __gin__ import dynamic_registration""", """from music_spectrogram_diffusion.models.diffusion import diffusion_utils""", """diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""", """diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""", ] lowercase = os.path.join(args.checkpoint_path , """..""" , """config.gin""" ) lowercase = inference.parse_training_gin_file(lowerCAmelCase__ , lowerCAmelCase__ ) lowercase = inference.InferenceModel(args.checkpoint_path , lowerCAmelCase__ ) lowercase = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" ) lowercase = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) lowercase = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , ) lowercase = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) lowercase = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , lowerCAmelCase__ ) lowercase = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , lowerCAmelCase__ ) lowercase = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , lowerCAmelCase__ ) lowercase = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" ) lowercase = SpectrogramDiffusionPipeline( notes_encoder=lowerCAmelCase__ , continuous_encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , melgan=lowerCAmelCase__ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help="""Path to the original jax model checkpoint.""", ) __lowerCAmelCase : int =parser.parse_args() main(args)
197
"""simple docstring""" import absl # noqa: F401 # Here to have a nice missing dependency error message early on import nltk # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import six # noqa: F401 # Here to have a nice missing dependency error message early on from rouge_score import rouge_scorer, scoring import datasets __lowerCAmelCase : str ="""\ @inproceedings{lin-2004-rouge, title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\", author = \"Lin, Chin-Yew\", booktitle = \"Text Summarization Branches Out\", month = jul, year = \"2004\", address = \"Barcelona, Spain\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W04-1013\", pages = \"74--81\", } """ __lowerCAmelCase : str ="""\ ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for evaluating automatic summarization and machine translation software in natural language processing. The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation. Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters. This metrics is a wrapper around Google Research reimplementation of ROUGE: https://github.com/google-research/google-research/tree/master/rouge """ __lowerCAmelCase : Optional[int] =""" Calculates average rouge scores for a list of hypotheses and references Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. rouge_types: A list of rouge types to calculate. Valid names: `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring, `\"rougeL\"`: Longest common subsequence based scoring. `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`. See details in https://github.com/huggingface/datasets/issues/617 use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes. use_aggregator: Return aggregates if this is set to True Returns: rouge1: rouge_1 (precision, recall, f1), rouge2: rouge_2 (precision, recall, f1), rougeL: rouge_l (precision, recall, f1), rougeLsum: rouge_lsum (precision, recall, f1) Examples: >>> rouge = datasets.load_metric('rouge') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> results = rouge.compute(predictions=predictions, references=references) >>> print(list(results.keys())) ['rouge1', 'rouge2', 'rougeL', 'rougeLsum'] >>> print(results[\"rouge1\"]) AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0)) >>> print(results[\"rouge1\"].mid.fmeasure) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def A__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[ """https://en.wikipedia.org/wiki/ROUGE_(metric)""", """https://github.com/google-research/google-research/tree/master/rouge""", ] , ) def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=False ): """simple docstring""" if rouge_types is None: lowercase = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""] lowercase = rouge_scorer.RougeScorer(rouge_types=__lowerCAmelCase , use_stemmer=__lowerCAmelCase ) if use_aggregator: lowercase = scoring.BootstrapAggregator() else: lowercase = [] for ref, pred in zip(__lowerCAmelCase , __lowerCAmelCase ): lowercase = scorer.score(__lowerCAmelCase , __lowerCAmelCase ) if use_aggregator: aggregator.add_scores(__lowerCAmelCase ) else: scores.append(__lowerCAmelCase ) if use_aggregator: lowercase = aggregator.aggregate() else: lowercase = {} for key in scores[0]: lowercase = [score[key] for score in scores] return result
197
1
from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __magic_name__ (__lowercase ): def __init__( self , _a , _a=None , _a=None , _a=0 ) -> List[Any]: lowerCAmelCase_ = 1.0 if scale is None else scale lowerCAmelCase_ = 0.0 if loc is None else loc super().__init__(_a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_a )] ) @property def __a ( self ) -> str: return self.base_dist.mean * self.scale + self.loc @property def __a ( self ) -> Tuple: return self.base_dist.variance * self.scale**2 @property def __a ( self ) -> Any: return self.variance.sqrt() class __magic_name__ (nn.Module ): def __init__( self , _a , _a , _a , **_a ) -> None: super().__init__(**_a ) lowerCAmelCase_ = args_dim lowerCAmelCase_ = nn.ModuleList([nn.Linear(_a , _a ) for dim in args_dim.values()] ) lowerCAmelCase_ = domain_map def __a ( self , _a ) -> Tuple[torch.Tensor]: lowerCAmelCase_ = [proj(_a ) for proj in self.proj] return self.domain_map(*_a ) class __magic_name__ (nn.Module ): def __init__( self , _a ) -> Union[str, Any]: super().__init__() lowerCAmelCase_ = function def __a ( self , _a , *_a ) -> Optional[Any]: return self.function(_a , *_a ) class __magic_name__ : lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 42 def __init__( self , _a = 1 ) -> None: lowerCAmelCase_ = dim lowerCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim} def __a ( self , _a ) -> Union[str, Any]: if self.dim == 1: return self.distribution_class(*_a ) else: return Independent(self.distribution_class(*_a ) , 1 ) def __a ( self , _a , _a = None , _a = None , ) -> Distribution: lowerCAmelCase_ = self._base_distribution(_a ) if loc is None and scale is None: return distr else: return AffineTransformed(_a , loc=_a , scale=_a , event_dim=self.event_dim ) @property def __a ( self ) -> Tuple: return () if self.dim == 1 else (self.dim,) @property def __a ( self ) -> int: return len(self.event_shape ) @property def __a ( self ) -> float: return 0.0 def __a ( self , _a ) -> nn.Module: return ParameterProjection( in_features=_a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def __a ( self , *_a ) -> str: raise NotImplementedError() @staticmethod def __a ( _a ) -> torch.Tensor: return (x + torch.sqrt(torch.square(_a ) + 4.0 )) / 2.0 class __magic_name__ (__lowercase ): lowerCamelCase__ = {"df": 1, "loc": 1, "scale": 1} lowerCamelCase__ = StudentT @classmethod def __a ( cls , _a , _a , _a ) -> Dict: lowerCAmelCase_ = cls.squareplus(_a ).clamp_min(torch.finfo(scale.dtype ).eps ) lowerCAmelCase_ = 2.0 + cls.squareplus(_a ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __magic_name__ (__lowercase ): lowerCamelCase__ = {"loc": 1, "scale": 1} lowerCamelCase__ = Normal @classmethod def __a ( cls , _a , _a ) -> Optional[Any]: lowerCAmelCase_ = cls.squareplus(_a ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __magic_name__ (__lowercase ): lowerCamelCase__ = {"total_count": 1, "logits": 1} lowerCamelCase__ = NegativeBinomial @classmethod def __a ( cls , _a , _a ) -> int: lowerCAmelCase_ = cls.squareplus(_a ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def __a ( self , _a ) -> Distribution: lowerCAmelCase_ , lowerCAmelCase_ = distr_args if self.dim == 1: return self.distribution_class(total_count=_a , logits=_a ) else: return Independent(self.distribution_class(total_count=_a , logits=_a ) , 1 ) def __a ( self , _a , _a = None , _a = None ) -> Distribution: lowerCAmelCase_ , lowerCAmelCase_ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
122
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase__ = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SEWForCTC''', '''SEWForSequenceClassification''', '''SEWModel''', '''SEWPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
122
1
'''simple docstring''' import math def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowerCamelCase__ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. lowerCamelCase :List[str] = '''Enter the base and the power separated by a comma: ''' lowerCamelCase , lowerCamelCase :Tuple = map(int, input(prompt).split(''',''')) lowerCamelCase , lowerCamelCase :Dict = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. lowerCamelCase :Dict = res(xa, ya) lowerCamelCase :Any = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
686
'''simple docstring''' from jiwer import compute_measures import datasets lowerCamelCase :int = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCamelCase :int = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' lowerCamelCase :Optional[Any] = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def _a (self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", ] , ) def _a (self , lowercase=None , lowercase=None , lowercase=False ): if concatenate_texts: return compute_measures(lowercase , lowercase )["wer"] else: A_ : List[Any] = 0 A_ : Optional[int] = 0 for prediction, reference in zip(lowercase , lowercase ): A_ : Any = compute_measures(lowercase , lowercase ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
686
1
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> bool: if number < 0: raise ValueError('''number must not be negative''' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
312
lowercase__ : Union[str, Any] = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
312
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer _A = logging.get_logger(__name__) _A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart _A = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } _A = { 'facebook/bart-base': 1024, 'facebook/bart-large': 1024, 'facebook/bart-large-mnli': 1024, 'facebook/bart-large-cnn': 1024, 'facebook/bart-large-xsum': 1024, 'yjernite/bart_eli5': 1024, } class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): _lowerCamelCase : List[str] = VOCAB_FILES_NAMES _lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""] _lowerCamelCase : Optional[int] = BartTokenizer def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ): super().__init__( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space: a_ = getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) ) a_ = add_prefix_space a_ = pre_tok_class(**_SCREAMING_SNAKE_CASE ) a_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a_ = """post_processor""" a_ = getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if tokenizer_component_instance: a_ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a_ = tuple(state["""sep"""] ) if "cls" in state: a_ = tuple(state["""cls"""] ) a_ = False if state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space: a_ = add_prefix_space a_ = True if state.get("""trim_offsets""" , _SCREAMING_SNAKE_CASE ) != trim_offsets: a_ = trim_offsets a_ = True if changes_to_apply: a_ = getattr(_SCREAMING_SNAKE_CASE , state.pop("""type""" ) ) a_ = component_class(**_SCREAMING_SNAKE_CASE ) setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @property def __magic_name__ ( self ): if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def __magic_name__ ( self , _SCREAMING_SNAKE_CASE ): a_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value a_ = value def __magic_name__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): a_ = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __magic_name__ ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): a_ = kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): a_ = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE ) return tuple(_SCREAMING_SNAKE_CASE ) def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): a_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __magic_name__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): a_ = [self.sep_token_id] a_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
403
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int = 10**9 ) -> int: """simple docstring""" a_ = 1 a_ = 2 a_ = 0 a_ = 0 a_ = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value a_ = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'{solution() = }')
403
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowercase ( __lowerCamelCase , unittest.TestCase ): snake_case_ = KandinskyVaaControlnetPipeline snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = ["""image_embeds""", """negative_image_embeds""", """hint"""] snake_case_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] snake_case_ = False @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return 32 @property def __lowercase ( self : int ): '''simple docstring''' return 32 @property def __lowercase ( self : Dict ): '''simple docstring''' return self.time_input_dim @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __lowercase ( self : Any ): '''simple docstring''' return 100 @property def __lowercase ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ : int = UNetaDConditionModel(**A ) return model @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __lowercase ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : str = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.dummy_unet UpperCAmelCase__ : List[Any] = self.dummy_movq UpperCAmelCase__ : List[Any] = DDIMScheduler( num_train_timesteps=1_000 ,beta_schedule="""linear""" ,beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,clip_sample=A ,set_alpha_to_one=A ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=A ,) UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __lowercase ( self : str ,A : Optional[Any] ,A : Any=0 ): '''simple docstring''' UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(A ) ).to(A ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( A ) # create hint UpperCAmelCase__ : int = floats_tensor((1, 3, 64, 64) ,rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase__ : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase__ : Dict = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase__ : Dict = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __lowercase ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = """cpu""" UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Union[str, Any] = self.pipeline_class(**A ) UpperCAmelCase__ : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(A ) ) UpperCAmelCase__ : Tuple = output.images UpperCAmelCase__ : Dict = pipe( **self.get_dummy_inputs(A ) ,return_dict=A ,)[0] UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Optional[int] = np.array( [0.6_9_5_9_8_2_6, 0.8_6_8_2_7_9, 0.7_5_5_8_0_9_2, 0.6_8_7_6_9_4_6_7, 0.8_5_8_0_5_8_0_4, 0.6_5_9_7_7_4_9_6, 0.4_4_8_8_5_3_0_2, 0.5_9_5_9_1_1_1, 0.4_2_5_1_5_9_5] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def __lowercase ( self : Union[str, Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) UpperCAmelCase__ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) UpperCAmelCase__ : int = torch.from_numpy(np.array(A ) ).float() / 2_5_5.0 UpperCAmelCase__ : Union[str, Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa ) pipe_prior.to(A ) UpperCAmelCase__ : List[Any] = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipeline.to(A ) pipeline.set_progress_bar_config(disable=A ) UpperCAmelCase__ : Optional[Any] = """A robot, 4k photo""" UpperCAmelCase__ : List[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ , UpperCAmelCase__ : Tuple = pipe_prior( A ,generator=A ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple() UpperCAmelCase__ : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 ) UpperCAmelCase__ : int = pipeline( image_embeds=A ,negative_image_embeds=A ,hint=A ,generator=A ,num_inference_steps=100 ,output_type="""np""" ,) UpperCAmelCase__ : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A ,A )
65
'''simple docstring''' import math import random def _lowercase ( lowerCamelCase__ : float, lowerCamelCase__ : bool = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __snake_case : Tuple = 0.02 def _lowercase ( lowerCamelCase__ : int, lowerCamelCase__ : int ): _a = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(lowerCamelCase__ ): # Forward propagation _a = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? _a = (expected / 100) - layer_a # Error delta _a = layer_1_error * sigmoid_function(lowerCamelCase__, lowerCamelCase__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __snake_case : Dict = int(input("Expected value: ")) __snake_case : Tuple = int(input("Number of propagations: ")) print(forward_propagation(expected, number_propagations))
131
0
def A ( _lowerCamelCase ): '''simple docstring''' if len(_lowerCamelCase ) <= 1: return [tuple(_lowerCamelCase )] _lowerCAmelCase : Optional[Any] = [] def generate(_lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : List[str] = [0] * n res.append(tuple(_lowerCamelCase ) ) _lowerCAmelCase : Any = 0 while i < n: if c[i] < i: if i % 2 == 0: _lowerCAmelCase , _lowerCAmelCase : List[str] = arr[i], arr[0] else: _lowerCAmelCase , _lowerCAmelCase : List[Any] = arr[i], arr[c[i]] res.append(tuple(_lowerCamelCase ) ) c[i] += 1 _lowerCAmelCase : Dict = 0 else: _lowerCAmelCase : Optional[int] = 0 i += 1 generate(len(_lowerCamelCase ) , _lowerCamelCase ) return res if __name__ == "__main__": _snake_case = input("Enter numbers separated by a comma:\n").strip() _snake_case = [int(item) for item in user_input.split(",")] print(heaps(arr))
658
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def A ( _lowerCamelCase , _lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase ) if display: print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) ) return config def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ): '''simple docstring''' if conf_path is None: _lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml" _lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase ) _lowerCAmelCase : str = VQModel(**config.model.params ) if ckpt_path is None: _lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt" _lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase ) if ".ckpt" in ckpt_path: _lowerCAmelCase : List[Any] = sd["state_dict"] model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) model.to(_lowerCamelCase ) del sd return model def A ( _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase ) print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" ) _lowerCAmelCase : int = model.decode(_lowerCamelCase ) return xrec def A ( _lowerCamelCase , _lowerCamelCase=False ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 ) if reload: _lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase ) importlib.reload(_lowerCamelCase ) return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls ) def A ( _lowerCamelCase ): '''simple docstring''' if "target" not in config: raise KeyError("Expected key `target` to instantiate." ) return get_obj_from_str(config["target"] )(**config.get("params" , {} ) ) def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ): '''simple docstring''' _lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase ) if sd is not None: model.load_state_dict(_lowerCamelCase ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' if ckpt: _lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" ) _lowerCAmelCase : int = pl_sd["global_step"] print(F"loaded model from global step {global_step}." ) else: _lowerCAmelCase : Optional[int] = {"state_dict": None} _lowerCAmelCase : Any = None _lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"] return model, global_step
658
1
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( UpperCamelCase_ ): A_ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase_ , UpperCamelCase_ ) def a_ ( UpperCamelCase_ ): A_ , A_ = emb.weight.shape A_ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ ) A_ = emb.weight.data return lin_layer def a_ ( UpperCamelCase_ , UpperCamelCase_=None ): A_ = {} for old_key in state_dict.keys(): A_ = old_key if "moe_layer.experts." in key: if expert_idx is not None: A_ = key.replace("moe_layer.experts.0" , f"ffn.experts.expert_{expert_idx}" ) else: A_ = key.replace("moe_layer.experts." , "ffn.experts.expert_" ) if "gate" in key: A_ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" ) if "fc2" and "experts" not in key: A_ = key.replace(".fc2." , ".ffn.fc2." ) if "fc1" and "experts" not in key: A_ = key.replace(".fc1." , ".ffn.fc1." ) if ".encoder_attn." in key: A_ = key.replace(".encoder_attn." , ".cross_attention." ) if "encoder_attn_layer_norm" in key: A_ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" ) if "final_layer_norm" in key: A_ = key.replace("final_layer_norm" , "ff_layer_norm" ) A_ = state_dict[old_key] return new_dict def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = WEIGHTS_NAME ): A_ = [] A_ = 0 os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) for expert in range(UpperCamelCase_ ): A_ = switch_checkpoint_path + f"-rank-{expert}.pt" if os.path.isfile(UpperCamelCase_ ): A_ = torch.load(UpperCamelCase_ )["model"] remove_ignore_keys_(UpperCamelCase_ ) A_ = rename_fairseq_keys(UpperCamelCase_ , UpperCamelCase_ ) A_ = os.path.join( UpperCamelCase_ , weights_name.replace(".bin" , f"-{len(UpperCamelCase_ )+1:05d}-of-???.bin" ) ) torch.save(UpperCamelCase_ , UpperCamelCase_ ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(UpperCamelCase_ )[0]].dtype ) # Add the last block A_ = os.path.join(UpperCamelCase_ , weights_name.replace(".bin" , f"-{len(UpperCamelCase_ )+1:05d}-of-???.bin" ) ) A_ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"] remove_ignore_keys_(UpperCamelCase_ ) A_ = rename_fairseq_keys(UpperCamelCase_ , UpperCamelCase_ ) A_ = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(UpperCamelCase_ ) == 1: A_ = os.path.join(UpperCamelCase_ , UpperCamelCase_ ) torch.save(UpperCamelCase_ , UpperCamelCase_ ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(UpperCamelCase_ , UpperCamelCase_ ) # Otherwise, let's build the index A_ = {} for idx, shard in enumerate(UpperCamelCase_ ): A_ = weights_name.replace(".bin" , f"-{idx+1:05d}-of-{len(UpperCamelCase_ ):05d}.bin" ) A_ = os.path.join(UpperCamelCase_ , weights_name.replace(".bin" , f"-{idx+1:05d}-of-???.bin" ) ) os.rename(UpperCamelCase_ , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ) for key in shard: A_ = shard_file # Add the metadata A_ = {"total_size": total_size} A_ = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , "w" , encoding="utf-8" ) as f: A_ = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + "\n" f.write(UpperCamelCase_ ) return metadata, index if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--nllb_moe_checkpoint_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''', type=str, required=False, help='''Path to a directory containing a folder per layer. Follows the original Google format.''', ) parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''') parser.add_argument( '''--pytorch_dump_folder_path''', default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''', type=str, required=False, help='''Path to the output pytorch model.''', ) __SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 128, args.dtype, ) __SCREAMING_SNAKE_CASE : Optional[Any] = NllbMoeConfig.from_pretrained( '''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128 ) config.save_pretrained(args.pytorch_dump_folder_path) __SCREAMING_SNAKE_CASE : List[str] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print('''Done''') model.save_pretrained(args.pytorch_dump_folder_path)
452
'''simple docstring''' import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser __SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__) torch.set_grad_enabled(False) __SCREAMING_SNAKE_CASE : Dict = '''cuda''' if torch.cuda.is_available() else '''cpu''' def a_ ( UpperCamelCase_ , UpperCamelCase_=1_0_0 , UpperCamelCase_=" " ): A_ = text.split(UpperCamelCase_ ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ )] def a_ ( UpperCamelCase_ ): A_ , A_ = [], [] for title, text in zip(documents["title"] , documents["text"] ): if text is not None: for passage in split_text(UpperCamelCase_ ): titles.append(title if title is not None else "" ) texts.append(UpperCamelCase_ ) return {"title": titles, "text": texts} def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): A_ = ctx_tokenizer( documents["title"] , documents["text"] , truncation=UpperCamelCase_ , padding="longest" , return_tensors="pt" )["input_ids"] A_ = ctx_encoder(input_ids.to(device=UpperCamelCase_ ) , return_dict=UpperCamelCase_ ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ): ###################################### logger.info("Step 1 - Create the dataset" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way A_ = load_dataset( "csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words A_ = dataset.map(UpperCamelCase_ , batched=UpperCamelCase_ , num_proc=processing_args.num_proc ) # And compute the embeddings A_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCamelCase_ ) A_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) A_ = Features( {"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space A_ = dataset.map( partial(UpperCamelCase_ , ctx_encoder=UpperCamelCase_ , ctx_tokenizer=UpperCamelCase_ ) , batched=UpperCamelCase_ , batch_size=processing_args.batch_size , features=UpperCamelCase_ , ) # And finally save your dataset A_ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" ) dataset.save_to_disk(UpperCamelCase_ ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search A_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("embeddings" , custom_index=UpperCamelCase_ ) # And save the index A_ = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" ) dataset.get_index("embeddings" ).save(UpperCamelCase_ ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __lowerCAmelCase : """simple docstring""" _UpperCAmelCase : str =field( default=str(Path(lowercase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) _UpperCAmelCase : Optional[str] =field( default=lowercase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) _UpperCAmelCase : str =field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) _UpperCAmelCase : str =field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) _UpperCAmelCase : Optional[str] =field( default=str(Path(lowercase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class __lowerCAmelCase : """simple docstring""" _UpperCAmelCase : Optional[int] =field( default=lowercase , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) _UpperCAmelCase : int =field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class __lowerCAmelCase : """simple docstring""" _UpperCAmelCase : int =field( default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) _UpperCAmelCase : int =field( default=128 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) __SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE : List[str] = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
452
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
466
'''simple docstring''' from __future__ import annotations import time import numpy as np SCREAMING_SNAKE_CASE_ = [8, 5, 9, 7] SCREAMING_SNAKE_CASE_ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] SCREAMING_SNAKE_CASE_ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class a : """simple docstring""" def __init__( self , snake_case_ , snake_case_ , snake_case_ , ): '''simple docstring''' __UpperCAmelCase: str = claim_vector __UpperCAmelCase: Optional[Any] = allocated_resources_table __UpperCAmelCase: List[str] = maximum_claim_table def lowercase_ ( self ): '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def lowercase_ ( self ): '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def lowercase_ ( self ): '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def lowercase_ ( self ): '''simple docstring''' return {self.__need().index(snake_case_ ): i for i in self.__need()} def lowercase_ ( self , **snake_case_ ): '''simple docstring''' __UpperCAmelCase: Dict = self.__need() __UpperCAmelCase: Optional[Any] = self.__allocated_resources_table __UpperCAmelCase: int = self.__available_resources() __UpperCAmelCase: List[Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: __UpperCAmelCase: Any = False for each_need in need_list: __UpperCAmelCase: Any = True for index, need in enumerate(snake_case_ ): if need > available_resources[index]: __UpperCAmelCase: Tuple = False break if execution: __UpperCAmelCase: Optional[int] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __UpperCAmelCase: Tuple = original_need_index print(F'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(snake_case_ ) # update available/freed resources stack __UpperCAmelCase: Dict = np.array(snake_case_ ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(snake_case_ ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def lowercase_ ( self ): '''simple docstring''' print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( F'''P{self.__allocated_resources_table.index(snake_case_ ) + 1}''' + """ """.join(F'''{it:>8}''' for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( F'''P{self.__maximum_claim_table.index(snake_case_ ) + 1}''' + """ """.join(F'''{it:>8}''' for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(snake_case_ ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(snake_case_ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
466
1
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=9_9 , _lowerCAmelCase=3_2 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=3_7 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=5_1_2 , _lowerCAmelCase=1_6 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): _lowercase : Dict = parent _lowercase : Optional[int] = batch_size _lowercase : List[str] = seq_length _lowercase : List[str] = is_training _lowercase : Optional[int] = use_input_mask _lowercase : Union[str, Any] = use_token_type_ids _lowercase : List[str] = use_labels _lowercase : Tuple = vocab_size _lowercase : Tuple = hidden_size _lowercase : Any = num_hidden_layers _lowercase : Union[str, Any] = num_attention_heads _lowercase : Dict = intermediate_size _lowercase : List[str] = hidden_act _lowercase : Any = hidden_dropout_prob _lowercase : Dict = attention_probs_dropout_prob _lowercase : Optional[int] = max_position_embeddings _lowercase : Optional[Any] = type_vocab_size _lowercase : str = type_sequence_label_size _lowercase : str = initializer_range _lowercase : List[Any] = num_labels _lowercase : List[str] = num_choices _lowercase : Union[str, Any] = scope def __a ( self ): _lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowercase : str = None if self.use_input_mask: _lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) _lowercase : str = None _lowercase : List[str] = None _lowercase : Optional[int] = None _lowercase : List[str] = None if self.use_labels: _lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) _lowercase : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self ): return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_lowerCAmelCase , ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowercase : Dict = FalconModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) _lowercase : str = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowercase : Union[str, Any] = True _lowercase : str = FalconModel(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Any = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , ) _lowercase : List[Any] = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , ) _lowercase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowercase : Union[str, Any] = FalconForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): _lowercase : Optional[Any] = True _lowercase : Dict = True _lowercase : Optional[int] = FalconForCausalLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # first forward pass _lowercase : Tuple = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , ) _lowercase : Optional[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowercase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowercase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowercase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) _lowercase : Tuple = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0] _lowercase : Any = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0] # select random slice _lowercase : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() _lowercase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) ) def __a ( self ): _lowercase : Optional[int] = self.prepare_config_and_inputs() ( ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ( _lowercase ) , ) : Union[str, Any] = config_and_inputs _lowercase : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCamelCase : Union[str, Any] = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _UpperCamelCase : Optional[Any] = (FalconForCausalLM,) if is_torch_available() else () _UpperCamelCase : str = ( { "feature-extraction": FalconModel, "text-classification": FalconForSequenceClassification, "text-generation": FalconForCausalLM, "question-answering": FalconForQuestionAnswering, "token-classification": FalconForTokenClassification, "zero-shot": FalconForSequenceClassification, } if is_torch_available() else {} ) _UpperCamelCase : Tuple = False _UpperCamelCase : str = False def __a ( self ): _lowercase : Dict = FalconModelTester(self ) _lowercase : Dict = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 ) def __a ( self ): self.config_tester.run_common_tests() def __a ( self ): _lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def __a ( self ): _lowercase , *_lowercase : Dict = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: _lowercase : Union[str, Any] = alibi self.model_tester.create_and_check_model(_lowerCAmelCase , *_lowerCAmelCase ) def __a ( self ): _lowercase , _lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Optional[Any] = 3 _lowercase : int = input_dict['input_ids'] _lowercase : List[Any] = input_ids.ne(1 ).to(_lowerCAmelCase ) _lowercase : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _lowercase : List[str] = FalconForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __a ( self ): _lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : List[Any] = 3 _lowercase : Tuple = 'single_label_classification' _lowercase : Dict = input_dict['input_ids'] _lowercase : Union[str, Any] = input_ids.ne(1 ).to(_lowerCAmelCase ) _lowercase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) _lowercase : Tuple = FalconForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __a ( self ): _lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : Optional[Any] = input_dict['input_ids'] _lowercase : Union[str, Any] = FalconForCausalLM(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : List[Any] = model(_lowerCAmelCase , use_cache=_lowerCAmelCase ) _lowercase : int = input_ids.shape[0] _lowercase : str = model._convert_to_rw_cache(result.past_key_values ) _lowercase : List[str] = model._convert_cache_to_standard_format(_lowerCAmelCase , _lowerCAmelCase ) for layer in range(len(_lowerCAmelCase ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def __a ( self ): _lowercase , _lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _lowercase : List[str] = 3 _lowercase : List[Any] = 'multi_label_classification' _lowercase : Union[str, Any] = input_dict['input_ids'] _lowercase : int = input_ids.ne(1 ).to(_lowerCAmelCase ) _lowercase : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) _lowercase : Dict = FalconForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() _lowercase : Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __a ( self ): # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: _lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(_lowerCAmelCase , 'use_cache' ): return _lowercase : Any = model_class(_lowerCAmelCase ).to(_lowerCAmelCase ) if "use_cache" not in inputs: _lowercase : Optional[int] = True _lowercase : Tuple = model(**_lowerCAmelCase ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return _lowercase : str = ( getattr(_lowerCAmelCase , 'decoder_layers' , _lowerCAmelCase ) or getattr(_lowerCAmelCase , 'num_decoder_layers' , _lowerCAmelCase ) or config.num_hidden_layers ) _lowercase : Tuple = getattr(_lowerCAmelCase , 'num_kv_heads' , config.num_attention_heads ) _lowercase : Any = getattr(_lowerCAmelCase , 'd_model' , config.hidden_size ) _lowercase : List[Any] = embed_dim // num_attention_heads _lowercase : List[str] = outputs['past_key_values'] self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) _lowercase , _lowercase : Optional[Any] = inputs['input_ids'].shape for i in range(_lowerCAmelCase ): if config.new_decoder_architecture: _lowercase : Dict = config.num_attention_heads elif config.multi_query: _lowercase : Dict = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): @slow def __a ( self ): _lowercase : List[str] = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' ) _lowercase : Any = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' ) model.eval() model.to(_lowerCAmelCase ) _lowercase : Union[str, Any] = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowerCAmelCase ) _lowercase : Tuple = ( 'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.' ) _lowercase : Optional[Any] = model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=1_9 ) _lowercase : Tuple = tokenizer.batch_decode(_lowerCAmelCase )[0] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) @slow def __a ( self ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: _lowercase : List[str] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) _lowercase : str = FalconForCausalLM.from_pretrained(_lowerCAmelCase ) model.eval() model.to(_lowerCAmelCase ) _lowercase : str = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowerCAmelCase ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=4 ) model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=4 ) model.generate(**_lowerCAmelCase , num_beams=2 , max_new_tokens=4 ) @slow def __a ( self ): # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: _lowercase : str = AutoTokenizer.from_pretrained(_lowerCAmelCase ) _lowercase : Union[str, Any] = FalconForCausalLM.from_pretrained(_lowerCAmelCase ) model.eval() model.to(device=_lowerCAmelCase ) _lowercase : Any = tokenizer('My favorite food is' , return_tensors='pt' ).to(_lowerCAmelCase ) # Test results are the same with and without cache _lowercase : str = model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=2_0 , use_cache=_lowerCAmelCase ) _lowercase : Dict = model.generate(**_lowerCAmelCase , do_sample=_lowerCAmelCase , max_new_tokens=2_0 , use_cache=_lowerCAmelCase ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
66
from __future__ import annotations from collections.abc import Iterable, Iterator from dataclasses import dataclass lowerCamelCase =(3, 9, -1_1, 0, 7, 5, 1, -1) lowerCamelCase =(4, 6, 2, 0, 8, 1_0, 3, -2) @dataclass class _lowerCamelCase : """simple docstring""" SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 class _lowerCamelCase : """simple docstring""" def __init__( self , __SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" UpperCamelCase__ : Node | None = None for i in sorted(__SCREAMING_SNAKE_CASE , reverse=__SCREAMING_SNAKE_CASE ): UpperCamelCase__ : List[str] = Node(__SCREAMING_SNAKE_CASE , self.head ) def __iter__( self ) -> Iterator[int]: """simple docstring""" UpperCamelCase__ : int = self.head while node: yield node.data UpperCamelCase__ : List[Any] = node.next_node def __len__( self ) -> int: """simple docstring""" return sum(1 for _ in self ) def __str__( self ) -> str: """simple docstring""" return " -> ".join([str(__SCREAMING_SNAKE_CASE ) for node in self] ) def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ): return SortedLinkedList(list(UpperCamelCase__ ) + list(UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase =SortedLinkedList print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
285
0
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase : Optional[Any] = ( """https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py""" ) lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name def UpperCAmelCase_ ( ): lowerCamelCase_: Union[str, Any] = """https://pypi.org/pypi/diffusers/json""" lowerCamelCase_: Optional[int] = json.loads(request.urlopen(_UpperCAmelCase ).read() )["""releases"""].keys() return sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : version.Version(_UpperCAmelCase ) ) def UpperCAmelCase_ ( ): # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(_UpperCAmelCase ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) lowerCamelCase_: List[str] = Path(_UpperCAmelCase ) / """__init__.py""" if not init_path.exists(): init_path.touch() def UpperCAmelCase_ ( _UpperCAmelCase ): init_hf_modules() lowerCamelCase_: Optional[int] = Path(_UpperCAmelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase ) lowerCamelCase_: Union[str, Any] = dynamic_module_path / """__init__.py""" if not init_path.exists(): init_path.touch() def UpperCAmelCase_ ( _UpperCAmelCase ): with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase_: Union[str, Any] = f.read() # Imports of the form `import .xxx` lowerCamelCase_: Optional[int] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , _UpperCAmelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , _UpperCAmelCase , flags=re.MULTILINE ) # Unique-ify return list(set(_UpperCAmelCase ) ) def UpperCAmelCase_ ( _UpperCAmelCase ): lowerCamelCase_: Any = False lowerCamelCase_: Dict = [module_file] lowerCamelCase_: int = [] # Let's recurse through all relative imports while not no_change: lowerCamelCase_: Any = [] for f in files_to_check: new_imports.extend(get_relative_imports(_UpperCAmelCase ) ) lowerCamelCase_: str = Path(_UpperCAmelCase ).parent lowerCamelCase_: List[str] = [str(module_path / m ) for m in new_imports] lowerCamelCase_: Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports] lowerCamelCase_: List[Any] = [f"""{f}.py""" for f in new_import_files] lowerCamelCase_: Tuple = len(_UpperCAmelCase ) == 0 all_relative_imports.extend(_UpperCAmelCase ) return all_relative_imports def UpperCAmelCase_ ( _UpperCAmelCase ): with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f: lowerCamelCase_: Union[str, Any] = f.read() # Imports of the form `import xxx` lowerCamelCase_: List[str] = re.findall("""^\s*import\s+(\S+)\s*$""" , _UpperCAmelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall("""^\s*from\s+(\S+)\s+import""" , _UpperCAmelCase , flags=re.MULTILINE ) # Only keep the top-level module lowerCamelCase_: List[str] = [imp.split(""".""" )[0] for imp in imports if not imp.startswith(""".""" )] # Unique-ify and test we got them all lowerCamelCase_: Union[str, Any] = list(set(_UpperCAmelCase ) ) lowerCamelCase_: str = [] for imp in imports: try: importlib.import_module(_UpperCAmelCase ) except ImportError: missing_packages.append(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: raise ImportError( """This modeling file requires the following packages that were not found in your environment: """ f"""{', '.join(_UpperCAmelCase )}. Run `pip install {' '.join(_UpperCAmelCase )}`""" ) return get_relative_imports(_UpperCAmelCase ) def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ): lowerCamelCase_: List[str] = module_path.replace(os.path.sep , """.""" ) lowerCamelCase_: Optional[Any] = importlib.import_module(_UpperCAmelCase ) if class_name is None: return find_pipeline_class(_UpperCAmelCase ) return getattr(_UpperCAmelCase , _UpperCAmelCase ) def UpperCAmelCase_ ( _UpperCAmelCase ): from ..pipelines import DiffusionPipeline lowerCamelCase_: int = dict(inspect.getmembers(_UpperCAmelCase , inspect.isclass ) ) lowerCamelCase_: int = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , _UpperCAmelCase ) and cls.__module__.split(""".""" )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:""" f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in""" f""" {loaded_module}.""" ) lowerCamelCase_: Any = cls return pipeline_class def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ): lowerCamelCase_: Dict = str(_UpperCAmelCase ) lowerCamelCase_: str = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ): lowerCamelCase_: str = module_file_or_url lowerCamelCase_: str = """local""" elif pretrained_model_name_or_path.count("""/""" ) == 0: lowerCamelCase_: int = get_diffusers_versions() # cut ".dev0" lowerCamelCase_: Optional[int] = """v""" + """.""".join(__version__.split(""".""" )[:3] ) # retrieve github version that matches if revision is None: lowerCamelCase_: List[Any] = latest_version if latest_version[1:] in available_versions else """main""" logger.info(f"""Defaulting to latest_version: {revision}.""" ) elif revision in available_versions: lowerCamelCase_: Union[str, Any] = f"""v{revision}""" elif revision == "main": lowerCamelCase_: Union[str, Any] = revision else: raise ValueError( f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of""" f""" {', '.join(available_versions + ['main'] )}.""" ) # community pipeline on GitHub lowerCamelCase_: Optional[Any] = COMMUNITY_PIPELINES_URL.format(revision=_UpperCAmelCase , pipeline=_UpperCAmelCase ) try: lowerCamelCase_: str = cached_download( _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , ) lowerCamelCase_: Tuple = """git""" lowerCamelCase_: List[Any] = pretrained_model_name_or_path + """.py""" except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise else: try: # Load from URL or cache if already cached lowerCamelCase_: Optional[Any] = hf_hub_download( _UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , proxies=_UpperCAmelCase , resume_download=_UpperCAmelCase , local_files_only=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , ) lowerCamelCase_: Union[str, Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""" ) ) ) except EnvironmentError: logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" ) raise # Check we have all the requirements in our environment lowerCamelCase_: List[str] = check_imports(_UpperCAmelCase ) # Now we move the module inside our cached dynamic modules. lowerCamelCase_: List[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(_UpperCAmelCase ) lowerCamelCase_: List[str] = Path(_UpperCAmelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(_UpperCAmelCase , submodule_path / module_file ) for module_needed in modules_needed: lowerCamelCase_: Dict = f"""{module_needed}.py""" shutil.copy(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCamelCase_: Dict = use_auth_token elif use_auth_token is True: lowerCamelCase_: List[Any] = HfFolder.get_token() else: lowerCamelCase_: List[Any] = None lowerCamelCase_: int = model_info(_UpperCAmelCase , revision=_UpperCAmelCase , token=_UpperCAmelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. lowerCamelCase_: Optional[Any] = submodule_path / commit_hash lowerCamelCase_: List[str] = full_submodule + os.path.sep + commit_hash create_dynamic_module(_UpperCAmelCase ) if not (submodule_path / module_file).exists(): shutil.copy(_UpperCAmelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( _UpperCAmelCase , f"""{module_needed}.py""" , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , ) return os.path.join(_UpperCAmelCase , _UpperCAmelCase ) def UpperCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ): lowerCamelCase_: Any = get_cached_module_file( _UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , ) return get_class_in_module(_UpperCAmelCase , final_module.replace(""".py""" , """""" ) )
713
import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class a__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _A = RoFormerTokenizer _A = RoFormerTokenizerFast _A = True _A = True def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" super().setUp() def lowerCAmelCase ( self : List[str] , **A_ : Optional[int] ) -> Tuple: """simple docstring""" return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A_ ) def lowerCAmelCase ( self : Any , **A_ : Optional[int] ) -> Dict: """simple docstring""" return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A_ ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" lowerCamelCase_: Optional[Any] = """永和服装饰品有限公司,今天天气非常好""" lowerCamelCase_: int = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好""" return input_text, output_text def lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_: str = self.get_tokenizer() lowerCamelCase_ , lowerCamelCase_: Union[str, Any] = self.get_chinese_input_output_texts() lowerCamelCase_: Optional[int] = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , output_text.split() ) lowerCamelCase_: int = tokens + [tokenizer.unk_token] lowerCamelCase_: List[str] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" lowerCamelCase_: Any = self.get_rust_tokenizer() lowerCamelCase_ , lowerCamelCase_: Optional[int] = self.get_chinese_input_output_texts() lowerCamelCase_: Optional[int] = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , output_text.split() ) lowerCamelCase_: Dict = tokens + [tokenizer.unk_token] lowerCamelCase_: List[str] = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" pass def lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" pass def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" pass
584
0
"""simple docstring""" def a_ ( lowercase__ :Dict ): __lowerCamelCase = [] __lowerCamelCase = [] __lowerCamelCase = { """^""": 3, """*""": 2, """/""": 2, """%""": 2, """+""": 1, """-""": 1, } # Priority of each operator __lowerCamelCase = len(lowercase__ ) if (len(lowercase__ ) > 7) else 7 # Print table header for output print( """Symbol""".center(8 ), """Stack""".center(lowercase__ ), """Postfix""".center(lowercase__ ), sep=""" | """, ) print("""-""" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(lowercase__ ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(lowercase__ ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(lowercase__ ) == 0: stack.append(lowercase__ ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(lowercase__ ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(lowercase__ ) # push x to stack print( x.center(8 ), ("""""".join(lowercase__ )).ljust(lowercase__ ), ("""""".join(lowercase__ )).ljust(lowercase__ ), sep=""" | """, ) # Output in tabular format while len(lowercase__ ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( """ """.center(8 ), ("""""".join(lowercase__ )).ljust(lowercase__ ), ("""""".join(lowercase__ )).ljust(lowercase__ ), sep=""" | """, ) # Output in tabular format return "".join(lowercase__ ) # return Postfix as str def a_ ( lowercase__ :List[str] ): __lowerCamelCase = list(infix[::-1] ) # reverse the infix equation for i in range(len(lowercase__ ) ): if infix[i] == "(": __lowerCamelCase = """)""" # change "(" to ")" elif infix[i] == ")": __lowerCamelCase = """(""" # change ")" to "(" return (infix_2_postfix("""""".join(lowercase__ ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": __magic_name__ : str = input('\nEnter an Infix Equation = ') # Input an Infix equation __magic_name__ : Optional[int] = ''.join(Infix.split()) # Remove spaces from the input print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
281
"""simple docstring""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ : List[str] = logging.get_logger(__name__) __magic_name__ : Tuple = { 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class __snake_case (lowerCamelCase ): __a = '''encodec''' def __init__( self: Union[str, Any] , A_: Union[str, Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , A_: Any=2_40_00 , A_: List[Any]=1 , A_: Optional[Any]=False , A_: Optional[int]=None , A_: int=None , A_: List[str]=1_28 , A_: Union[str, Any]=32 , A_: List[str]=1 , A_: Dict=[8, 5, 4, 2] , A_: List[Any]="weight_norm" , A_: Any=7 , A_: List[Any]=7 , A_: Tuple=3 , A_: List[str]=2 , A_: Optional[Any]=True , A_: Optional[int]="reflect" , A_: Dict=2 , A_: Union[str, Any]=2 , A_: Union[str, Any]=1.0 , A_: List[str]=10_24 , A_: str=None , A_: List[str]=True , **A_: int , ): __lowerCamelCase = target_bandwidths __lowerCamelCase = sampling_rate __lowerCamelCase = audio_channels __lowerCamelCase = normalize __lowerCamelCase = chunk_length_s __lowerCamelCase = overlap __lowerCamelCase = hidden_size __lowerCamelCase = num_filters __lowerCamelCase = num_residual_layers __lowerCamelCase = upsampling_ratios __lowerCamelCase = norm_type __lowerCamelCase = kernel_size __lowerCamelCase = last_kernel_size __lowerCamelCase = residual_kernel_size __lowerCamelCase = dilation_growth_rate __lowerCamelCase = use_causal_conv __lowerCamelCase = pad_mode __lowerCamelCase = compress __lowerCamelCase = num_lstm_layers __lowerCamelCase = trim_right_ratio __lowerCamelCase = codebook_size __lowerCamelCase = codebook_dim if codebook_dim is not None else hidden_size __lowerCamelCase = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**A_ ) @property def __a ( self: Union[str, Any] ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __a ( self: List[Any] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def __a ( self: List[Any] ): __lowerCamelCase = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def __a ( self: List[Any] ): return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
281
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu A = False class __lowercase ( unittest.TestCase ): '''simple docstring''' def _lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCamelCase ( self ): return 12 @property def _lowerCamelCase ( self ): return 12 @property def _lowerCamelCase ( self ): return 32 @property def _lowerCamelCase ( self ): torch.manual_seed(0 ) __a : Union[str, Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def _lowerCamelCase ( self ): __a : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def _lowerCamelCase ( self ): torch.manual_seed(0 ) __a : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(_UpperCAmelCase ) @property def _lowerCamelCase ( self ): torch.manual_seed(0 ) __a : str = 12 __a : Optional[Any] = 12 __a : Dict = { '''attention_bias''': True, '''cross_attention_dim''': 32, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 32, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } __a : Optional[Any] = TransformeraDModel(**_UpperCAmelCase ) return model def _lowerCamelCase ( self ): __a : List[str] = '''cpu''' __a : int = self.dummy_vqvae __a : int = self.dummy_text_encoder __a : List[Any] = self.dummy_tokenizer __a : Tuple = self.dummy_transformer __a : Union[str, Any] = VQDiffusionScheduler(self.num_embed ) __a : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_UpperCAmelCase ) __a : Optional[Any] = VQDiffusionPipeline( vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , ) __a : str = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : Tuple = '''teddy bear playing in the pool''' __a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __a : str = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' ) __a : List[Any] = output.images __a : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __a : int = pipe( [prompt] , generator=_UpperCAmelCase , output_type='''np''' , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0] __a : Optional[Any] = image[0, -3:, -3:, -1] __a : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __a : Optional[Any] = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCamelCase ( self ): __a : Optional[Any] = '''cpu''' __a : Dict = self.dummy_vqvae __a : List[str] = self.dummy_text_encoder __a : List[str] = self.dummy_tokenizer __a : List[str] = self.dummy_transformer __a : List[str] = VQDiffusionScheduler(self.num_embed ) __a : List[str] = LearnedClassifierFreeSamplingEmbeddings( learnable=_UpperCAmelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __a : List[Any] = VQDiffusionPipeline( vqvae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , transformer=_UpperCAmelCase , scheduler=_UpperCAmelCase , learned_classifier_free_sampling_embeddings=_UpperCAmelCase , ) __a : List[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __a : List[str] = '''teddy bear playing in the pool''' __a : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __a : Dict = pipe([prompt] , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' ) __a : str = output.images __a : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __a : Tuple = pipe( [prompt] , generator=_UpperCAmelCase , output_type='''np''' , return_dict=_UpperCAmelCase , num_inference_steps=2 )[0] __a : Any = image[0, -3:, -3:, -1] __a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __a : Optional[int] = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): '''simple docstring''' def _lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self ): __a : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' ) __a : Dict = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' ) __a : Optional[Any] = pipeline.to(_UpperCAmelCase ) pipeline.set_progress_bar_config(disable=_UpperCAmelCase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __a : str = pipeline( '''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_UpperCAmelCase , output_type='''np''' , ) __a : Any = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
101
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping A = tuple[int, int] class __lowercase : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): __a : set[int] = vertices __a : dict[EdgeT, int] = { (min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items() } def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __a : Any = weight def _lowerCamelCase ( self ): __a : Graph = Graph({min(self.vertices )} , {} ) __a : EdgeT __a : int __a : EdgeT __a : int while len(subgraph.vertices ) < len(self.vertices ): __a : List[Any] = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __a : str = edge __a : int = weight subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase ) return subgraph def __A ( a_ :str = "p107_network.txt") -> int: __a : str = os.path.abspath(os.path.dirname(a_)) __a : str = os.path.join(a_ , a_) __a : dict[EdgeT, int] = {} __a : list[str] __a : int __a : int with open(a_) as f: __a : Any = f.read().strip().split('''\n''') __a : Union[str, Any] = [line.split(''',''') for line in data] for edgea in range(1 , len(a_)): for edgea in range(a_): if adjaceny_matrix[edgea][edgea] != "-": __a : int = int(adjaceny_matrix[edgea][edgea]) __a : Graph = Graph(set(range(len(a_))) , a_) __a : Graph = graph.prims_algorithm() __a : int = sum(graph.edges.values()) __a : int = sum(subgraph.edges.values()) return initial_total - optimal_total if __name__ == "__main__": print(F'{solution() = }')
101
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def A_ (__a , __a=False ): '''simple docstring''' A_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'module.blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'module.blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'module.blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'module.blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'module.blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'module.blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'module.blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'module.blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'module.blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'module.blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def A_ (__a , __a , __a=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: A_ = "" else: A_ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ = state_dict.pop(f'module.blocks.{i}.attn.qkv.weight' ) A_ = state_dict.pop(f'module.blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict A_ = in_proj_weight[ : config.hidden_size, : ] A_ = in_proj_bias[: config.hidden_size] A_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ = in_proj_weight[ -config.hidden_size :, : ] A_ = in_proj_bias[-config.hidden_size :] def A_ (__a ): '''simple docstring''' A_ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(__a , __a ) def A_ (__a ): '''simple docstring''' A_ = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(__a , __a ) def A_ (__a , __a , __a ): '''simple docstring''' A_ = dct.pop(__a ) A_ = val def A_ (__a , __a ): '''simple docstring''' A_ = ViTMSNConfig() A_ = 1000 A_ = "datasets/huggingface/label-files" A_ = "imagenet-1k-id2label.json" A_ = json.load(open(hf_hub_download(__a , __a ) , "r" ) ) A_ = {int(__a ): v for k, v in idalabel.items()} A_ = idalabel A_ = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: A_ = 384 A_ = 1536 A_ = 6 elif "l16" in checkpoint_url: A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 A_ = 0.1 elif "b4" in checkpoint_url: A_ = 4 elif "l7" in checkpoint_url: A_ = 7 A_ = 1024 A_ = 4096 A_ = 24 A_ = 16 A_ = 0.1 A_ = ViTMSNModel(__a ) A_ = torch.hub.load_state_dict_from_url(__a , map_location="cpu" )["target_encoder"] A_ = ViTImageProcessor(size=config.image_size ) remove_projection_head(__a ) A_ = create_rename_keys(__a , base_model=__a ) for src, dest in rename_keys: rename_key(__a , __a , __a ) read_in_q_k_v(__a , __a , base_model=__a ) model.load_state_dict(__a ) model.eval() A_ = "http://images.cocodataset.org/val2017/000000039769.jpg" A_ = Image.open(requests.get(__a , stream=__a ).raw ) A_ = ViTImageProcessor( size=config.image_size , image_mean=__a , image_std=__a ) A_ = image_processor(images=__a , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) A_ = model(**__a ) A_ = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: A_ = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: A_ = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: A_ = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: A_ = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: A_ = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , __a , atol=1e-4 ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(__a ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__a ) if __name__ == "__main__": UpperCamelCase_ : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase_ : Dict = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
115
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase_ : List[Any] = 256 class __lowerCAmelCase ( _lowercase ): """simple docstring""" snake_case = ["melgan"] def __init__( self : Dict , _snake_case : SpectrogramNotesEncoder , _snake_case : SpectrogramContEncoder , _snake_case : TaFilmDecoder , _snake_case : DDPMScheduler , _snake_case : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: """simple docstring""" super().__init__() # From MELGAN A_ = math.log(1e-5 ) # Matches MelGAN training. A_ = 4.0 # Largest value for most examples A_ = 128 self.register_modules( notes_encoder=_snake_case , continuous_encoder=_snake_case , decoder=_snake_case , scheduler=_snake_case , melgan=_snake_case , ) def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Tuple , _snake_case : str=(-1.0, 1.0) , _snake_case : int=False ) -> str: """simple docstring""" A_ , A_ = output_range if clip: A_ = torch.clip(_snake_case , self.min_value , self.max_value ) # Scale to [0, 1]. A_ = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def lowerCamelCase__ ( self : Dict , _snake_case : Tuple , _snake_case : Optional[Any]=(-1.0, 1.0) , _snake_case : List[str]=False ) -> List[str]: """simple docstring""" A_ , A_ = input_range A_ = torch.clip(_snake_case , _snake_case , _snake_case ) if clip else outputs # Scale to [0, 1]. A_ = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : str , _snake_case : List[Any] , _snake_case : Union[str, Any] ) -> List[str]: """simple docstring""" A_ = input_tokens > 0 A_ , A_ = self.notes_encoder( encoder_input_tokens=_snake_case , encoder_inputs_mask=_snake_case ) A_ , A_ = self.continuous_encoder( encoder_inputs=_snake_case , encoder_inputs_mask=_snake_case ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def lowerCamelCase__ ( self : List[Any] , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple ) -> Optional[int]: """simple docstring""" A_ = noise_time if not torch.is_tensor(_snake_case ): A_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(_snake_case ) and len(timesteps.shape ) == 0: A_ = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML A_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) A_ = self.decoder( encodings_and_masks=_snake_case , decoder_input_tokens=_snake_case , decoder_noise_time=_snake_case ) return logits @torch.no_grad() def __call__( self : List[Any] , _snake_case : List[List[int]] , _snake_case : Optional[torch.Generator] = None , _snake_case : int = 100 , _snake_case : bool = True , _snake_case : str = "numpy" , _snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _snake_case : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: """simple docstring""" if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_snake_case , _snake_case ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(_snake_case )}.' ) A_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) A_ = np.zeros([1, 0, self.n_dims] , np.floataa ) A_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_snake_case , device=self.device ) for i, encoder_input_tokens in enumerate(_snake_case ): if i == 0: A_ = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. A_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_snake_case , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. A_ = ones A_ = self.scale_features( _snake_case , output_range=[-1.0, 1.0] , clip=_snake_case ) A_ = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_snake_case , continuous_mask=_snake_case , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop A_ = randn_tensor( shape=encoder_continuous_inputs.shape , generator=_snake_case , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(_snake_case ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): A_ = self.decode( encodings_and_masks=_snake_case , input_tokens=_snake_case , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 A_ = self.scheduler.step(_snake_case , _snake_case , _snake_case , generator=_snake_case ).prev_sample A_ = self.scale_to_features(_snake_case , input_range=[-1.0, 1.0] ) A_ = mel[:1] A_ = mel.cpu().float().numpy() A_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_snake_case , _snake_case ) logger.info("Generated segment" , _snake_case ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": A_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: A_ = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=_snake_case )
115
1
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): snake_case__ = 0.00 snake_case__ = 0 for resistor in resistors: if resistor <= 0: snake_case__ = F"""Resistor at index {index} has a negative or zero value!""" raise ValueError(__lowerCAmelCase ) first_sum += 1 / float(__lowerCAmelCase ) index += 1 return 1 / first_sum def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): snake_case__ = 0.00 snake_case__ = 0 for resistor in resistors: sum_r += resistor if resistor < 0: snake_case__ = F"""Resistor at index {index} has a negative value!""" raise ValueError(__lowerCAmelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
530
from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = "laptop" ): snake_case__ = F"""https://www.amazon.in/laptop/s?k={product}""" snake_case__ = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36", "Accept-Language": "en-US, en;q=0.5", } snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).text ) # Initialize a Pandas dataframe with the column titles snake_case__ = DataFrame( columns=[ "Product Title", "Product Link", "Current Price of the product", "Product Rating", "MRP of the product", "Discount", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( "div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ): try: snake_case__ = item.ha.text snake_case__ = "https://www.amazon.in/" + item.ha.a["href"] snake_case__ = item.find("span" , attrs={"class": "a-offscreen"} ).text try: snake_case__ = item.find("span" , attrs={"class": "a-icon-alt"} ).text except AttributeError: snake_case__ = "Not available" try: snake_case__ = ( "₹" + item.find( "span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1] ) except AttributeError: snake_case__ = "" try: snake_case__ = float( ( ( float(product_mrp.strip("₹" ).replace("," , "" ) ) - float(product_price.strip("₹" ).replace("," , "" ) ) ) / float(product_mrp.strip("₹" ).replace("," , "" ) ) ) * 100 ) except ValueError: snake_case__ = float("nan" ) except AttributeError: pass snake_case__ = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] snake_case__ = " " snake_case__ = " " data_frame.index += 1 return data_frame if __name__ == "__main__": __magic_name__ = '''headphones''' get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
530
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = {"vocab_file": "sentencepiece.bpe.model"} _lowerCamelCase = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, } _lowerCamelCase = { "moussaKam/mbarthez": 1024, "moussaKam/barthez": 1024, "moussaKam/barthez-orangesum-title": 1024, } _lowerCamelCase = "▁" class UpperCamelCase_ ( __lowercase ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = ["input_ids", "attention_mask"] def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Optional[int]="<s>" , __A :Optional[int]="</s>" , __A :List[Any]="</s>" , __A :Tuple="<s>" , __A :List[str]="<unk>" , __A :Any="<pad>" , __A :str="<mask>" , __A :Optional[Dict[str, Any]] = None , **__A :Optional[Any] , ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , ) SCREAMING_SNAKE_CASE__ = vocab_file SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) ) SCREAMING_SNAKE_CASE__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE__ = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] SCREAMING_SNAKE_CASE__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self :Tuple , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def _snake_case ( self :Union[str, Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case ( self :List[str] ) -> Dict: """simple docstring""" return len(self.sp_model ) def _snake_case ( self :Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case ( self :Dict , __A :str ) -> List[str]: """simple docstring""" return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self :Dict , __A :Tuple ) -> List[str]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) return spm_id if spm_id else self.unk_token_id def _snake_case ( self :List[Any] , __A :Dict ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self :Dict , __A :Optional[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = """""" SCREAMING_SNAKE_CASE__ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE__ = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) return out_string.strip() def __getstate__( self :List[str] ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ = self.__dict__.copy() SCREAMING_SNAKE_CASE__ = None return state def __setstate__( self :str , __A :List[Any] ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _snake_case ( self :Union[str, Any] , __A :str , __A :Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
6
'''simple docstring''' from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def _a (lowercase__ : str , lowercase__ : str , lowercase__ : Optional[str] = None ) -> str: """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release: # old versions of hfh don't url-encode the file path __snake_case = quote(lowercase__ ) return hfh.hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' , revision=lowercase__ )
56
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class lowercase__ ( UpperCamelCase_): UpperCamelCase_ = """openai/whisper-base""" UpperCamelCase_ = ( """This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """ """transcribed text.""" ) UpperCamelCase_ = """transcriber""" UpperCamelCase_ = WhisperProcessor UpperCamelCase_ = WhisperForConditionalGeneration UpperCamelCase_ = ["""audio"""] UpperCamelCase_ = ["""text"""] def __A ( self : Optional[Any] , UpperCamelCase__ : Any ): '''simple docstring''' return self.pre_processor(UpperCamelCase__ , return_tensors='''pt''' ).input_features def __A ( self : Any , UpperCamelCase__ : int ): '''simple docstring''' return self.model.generate(inputs=UpperCamelCase__ ) def __A ( self : Any , UpperCamelCase__ : Optional[int] ): '''simple docstring''' return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0]
700
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) class lowercase__ ( UpperCamelCase_): UpperCamelCase_ = ["""input_features""", """is_longer"""] def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=64 , UpperCamelCase__ : Optional[Any]=4_8000 , UpperCamelCase__ : Tuple=480 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=1024 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : int=False , UpperCamelCase__ : float = 0 , UpperCamelCase__ : float = 1_4000 , UpperCamelCase__ : int = None , UpperCamelCase__ : str = "fusion" , UpperCamelCase__ : str = "repeatpad" , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' super().__init__( feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) SCREAMING_SNAKE_CASE : Union[str, Any] = top_db SCREAMING_SNAKE_CASE : Union[str, Any] = truncation SCREAMING_SNAKE_CASE : str = padding SCREAMING_SNAKE_CASE : List[Any] = fft_window_size SCREAMING_SNAKE_CASE : Tuple = (fft_window_size >> 1) + 1 SCREAMING_SNAKE_CASE : List[str] = hop_length SCREAMING_SNAKE_CASE : List[Any] = max_length_s SCREAMING_SNAKE_CASE : Tuple = max_length_s * sampling_rate SCREAMING_SNAKE_CASE : List[Any] = sampling_rate SCREAMING_SNAKE_CASE : List[str] = frequency_min SCREAMING_SNAKE_CASE : Any = frequency_max SCREAMING_SNAKE_CASE : List[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm=UpperCamelCase__ , mel_scale='''htk''' , ) SCREAMING_SNAKE_CASE : Optional[Any] = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase__ , min_frequency=UpperCamelCase__ , max_frequency=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , ) def __A ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def __A ( self : Optional[int] , UpperCamelCase__ : np.array , UpperCamelCase__ : Optional[np.array] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = spectrogram( UpperCamelCase__ , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase__ , log_mel='''dB''' , ) return log_mel_spectrogram.T def __A ( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE : int = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE : Any = [0] # randomly choose index for each part SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.choice(ranges[0] ) SCREAMING_SNAKE_CASE : List[Any] = np.random.choice(ranges[1] ) SCREAMING_SNAKE_CASE : int = np.random.choice(ranges[2] ) SCREAMING_SNAKE_CASE : Optional[int] = mel[idx_front : idx_front + chunk_frames, :] SCREAMING_SNAKE_CASE : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :] SCREAMING_SNAKE_CASE : Tuple = mel[idx_back : idx_back + chunk_frames, :] SCREAMING_SNAKE_CASE : str = torch.tensor(mel[None, None, :] ) SCREAMING_SNAKE_CASE : Optional[int] = torch.nn.functional.interpolate( UpperCamelCase__ , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = mel_shrink[0][0].numpy() SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def __A ( self : Dict , UpperCamelCase__ : np.array , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ): '''simple docstring''' if waveform.shape[0] > max_length: if truncation == "rand_trunc": SCREAMING_SNAKE_CASE : Optional[Any] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ ) - max_length SCREAMING_SNAKE_CASE : Dict = np.random.randint(0 , overflow + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = waveform[idx : idx + max_length] SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :] elif truncation == "fusion": SCREAMING_SNAKE_CASE : Any = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters ) SCREAMING_SNAKE_CASE : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed SCREAMING_SNAKE_CASE : List[Any] = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. SCREAMING_SNAKE_CASE : Optional[Any] = np.stack([mel, mel, mel, mel] , axis=0 ) SCREAMING_SNAKE_CASE : Tuple = False else: SCREAMING_SNAKE_CASE : str = self._random_mel_fusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = True else: raise NotImplementedError(f"""data_truncating {truncation} not implemented""" ) else: SCREAMING_SNAKE_CASE : List[str] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": SCREAMING_SNAKE_CASE : Tuple = int(max_length / len(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE : Any = np.stack(np.tile(UpperCamelCase__ , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": SCREAMING_SNAKE_CASE : List[Any] = int(max_length / len(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE : Dict = np.stack(np.tile(UpperCamelCase__ , UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE : Dict = np.pad(UpperCamelCase__ , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": SCREAMING_SNAKE_CASE : List[Any] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: SCREAMING_SNAKE_CASE : List[str] = self._np_extract_fbank_features(UpperCamelCase__ , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : str = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Any , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = truncation if truncation is not None else self.truncation SCREAMING_SNAKE_CASE : List[str] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a""" f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input""" f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) SCREAMING_SNAKE_CASE : List[str] = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" ) SCREAMING_SNAKE_CASE : int = is_batched_numpy or ( isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ): SCREAMING_SNAKE_CASE : List[Any] = np.asarray(UpperCamelCase__ , dtype=np.floataa ) elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE : Union[str, Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE : List[str] = [np.asarray(UpperCamelCase__ )] # convert to mel spectrogram, truncate and pad if needed. SCREAMING_SNAKE_CASE : int = [ self._get_input_mel(UpperCamelCase__ , max_length if max_length else self.nb_max_samples , UpperCamelCase__ , UpperCamelCase__ ) for waveform in raw_speech ] SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : List[str] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase__ ) is_longer.append(UpperCamelCase__ ) if truncation == "fusion" and sum(UpperCamelCase__ ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.randint(0 , len(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = True if isinstance(input_mel[0] , UpperCamelCase__ ): SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool SCREAMING_SNAKE_CASE : Optional[Any] = [[longer] for longer in is_longer] SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_features''': input_mel, '''is_longer''': is_longer} SCREAMING_SNAKE_CASE : int = BatchFeature(UpperCamelCase__ ) if return_tensors is not None: SCREAMING_SNAKE_CASE : int = input_features.convert_to_tensors(UpperCamelCase__ ) return input_features
34
0
from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker lowerCamelCase : List[Any] = 'CompVis/stable-diffusion-v1-1' lowerCamelCase : Union[str, Any] = 'CompVis/stable-diffusion-v1-2' lowerCamelCase : List[str] = 'CompVis/stable-diffusion-v1-3' lowerCamelCase : Any = 'CompVis/stable-diffusion-v1-4' class __lowercase (UpperCamelCase__ ): """simple docstring""" def __init__( self , A , A , A , A , A , A , A , A = True , ) -> List[str]: super()._init_() snake_case : List[Any] = StableDiffusionPipeline.from_pretrained(A ) snake_case : str = StableDiffusionPipeline.from_pretrained(A ) snake_case : Optional[int] = StableDiffusionPipeline.from_pretrained(A ) snake_case : Dict = StableDiffusionPipeline( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , requires_safety_checker=A , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase ( self ) -> Dict[str, Any]: return {k: getattr(self , A ) for k in self.config.keys() if not k.startswith("""_""" )} def UpperCAmelCase ( self , A = "auto" ) -> Tuple: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def UpperCAmelCase ( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]: return self.pipea( prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , ) @torch.no_grad() def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[Any]: return self.pipea( prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , ) @torch.no_grad() def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Any: return self.pipea( prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , ) @torch.no_grad() def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Optional[int]: return self.pipea( prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , ) @torch.no_grad() def UpperCAmelCase ( self , A , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> List[Any]: snake_case : int = """cuda""" if torch.cuda.is_available() else """cpu""" self.to(A ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 snake_case : Tuple = self.textaimg_sda_a( prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , ) # Get first result from Stable Diffusion Checkpoint v1.2 snake_case : Tuple = self.textaimg_sda_a( prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , ) # Get first result from Stable Diffusion Checkpoint v1.3 snake_case : List[Any] = self.textaimg_sda_a( prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , ) # Get first result from Stable Diffusion Checkpoint v1.4 snake_case : Dict = self.textaimg_sda_a( prompt=A , height=A , width=A , num_inference_steps=A , guidance_scale=A , negative_prompt=A , num_images_per_prompt=A , eta=A , generator=A , latents=A , output_type=A , return_dict=A , callback=A , callback_steps=A , **A , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
587
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> List[Any]: try: snake_case : Tuple = os.environ[key] except KeyError: # KEY isn't set, default to `default`. snake_case : Tuple = default else: # KEY is set, convert it to True or False. try: snake_case : Tuple = strtobool(lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"""If set, {key} must be yes or no.""" ) return _value lowerCamelCase : Tuple = parse_flag_from_env('RUN_SLOW', default=False) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]: return unittest.skip("""Test was skipped""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]: return unittest.skipUnless(_run_slow_tests ,"""test is slow""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any: return unittest.skipUnless(not torch.cuda.is_available() ,"""test requires only a CPU""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]: return unittest.skipUnless(torch.cuda.is_available() ,"""test requires a GPU""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]: return unittest.skipUnless(is_xpu_available() ,"""test requires a XPU""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]: return unittest.skipUnless(is_mps_available() ,"""test requires a `mps` backend support in `torch`""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]: return unittest.skipUnless( is_transformers_available() and is_datasets_available() ,"""test requires the Hugging Face suite""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]: return unittest.skipUnless(is_bnb_available() ,"""test requires the bitsandbytes library""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]: return unittest.skipUnless(is_tpu_available() ,"""test requires TPU""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any: return unittest.skipUnless(torch.cuda.device_count() == 1 ,"""test requires a GPU""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]: return unittest.skipUnless(torch.xpu.device_count() == 1 ,"""test requires a XPU""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict: return unittest.skipUnless(torch.cuda.device_count() > 1 ,"""test requires multiple GPUs""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]: return unittest.skipUnless(torch.xpu.device_count() > 1 ,"""test requires multiple XPUs""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]: return unittest.skipUnless(is_safetensors_available() ,"""test requires safetensors""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any: return unittest.skipUnless(is_deepspeed_available() ,"""test requires DeepSpeed""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]: return unittest.skipUnless(is_torch_version(""">=""" ,"""1.12.0""" ) ,"""test requires torch version >= 1.12.0""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase=None ,lowercase=None ) -> Optional[int]: if test_case is None: return partial(lowercase ,version=lowercase ) return unittest.skipUnless(is_torch_version(""">=""" ,lowercase ) ,f"""test requires torch version >= {version}""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple: return unittest.skipUnless(is_tensorboard_available() ,"""test requires Tensorboard""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]: return unittest.skipUnless(is_wandb_available() ,"""test requires wandb""" )(lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]: return unittest.skipUnless(is_comet_ml_available() ,"""test requires comet_ml""" )(lowercase ) lowerCamelCase : Union[str, Any] = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]: return unittest.skipUnless( _atleast_one_tracker_available ,"""test requires at least one tracker to be available and for `comet_ml` to not be installed""" ,)(lowercase ) class __lowercase (unittest.TestCase ): """simple docstring""" _snake_case = True @classmethod def UpperCAmelCase ( cls ) -> int: snake_case : int = tempfile.mkdtemp() @classmethod def UpperCAmelCase ( cls ) -> str: if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def UpperCAmelCase ( self ) -> Tuple: if self.clear_on_setup: for path in Path(self.tmpdir ).glob("""**/*""" ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(A ) class __lowercase (unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self ) -> Optional[Any]: super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowercase (unittest.TestCase ): """simple docstring""" def UpperCAmelCase ( self , A ) -> Union[str, Any]: snake_case : List[str] = mocks if isinstance(A , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str: snake_case : Optional[int] = AcceleratorState() snake_case : int = tensor[None].clone().to(state.device ) snake_case : Dict = gather(lowercase ).cpu() snake_case : str = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] ,lowercase ): return False return True class __lowercase : """simple docstring""" def __init__( self , A , A , A ) -> Optional[int]: snake_case : Tuple = returncode snake_case : str = stdout snake_case : int = stderr async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str: while True: snake_case : Any = await stream.readline() if line: callback(lowercase ) else: break async def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ,lowercase=False ) -> _RunOutput: if echo: print("""\nRunning: """ ,""" """.join(lowercase ) ) snake_case : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=lowercase ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) snake_case : Dict = [] snake_case : Union[str, Any] = [] def tee(lowercase ,lowercase ,lowercase ,lowercase="" ): snake_case : str = line.decode("""utf-8""" ).rstrip() sink.append(lowercase ) if not quiet: print(lowercase ,lowercase ,file=lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout ,lambda lowercase : tee(lowercase ,lowercase ,sys.stdout ,label="""stdout:""" ) ) ), asyncio.create_task(_read_stream(p.stderr ,lambda lowercase : tee(lowercase ,lowercase ,sys.stderr ,label="""stderr:""" ) ) ), ] ,timeout=lowercase ,) return _RunOutput(await p.wait() ,lowercase ,lowercase ) def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ,lowercase=None ,lowercase=180 ,lowercase=False ,lowercase=True ) -> _RunOutput: snake_case : str = asyncio.get_event_loop() snake_case : Union[str, Any] = loop.run_until_complete( _stream_subprocess(lowercase ,env=lowercase ,stdin=lowercase ,timeout=lowercase ,quiet=lowercase ,echo=lowercase ) ) snake_case : List[str] = """ """.join(lowercase ) if result.returncode > 0: snake_case : List[Any] = """\n""".join(result.stderr ) raise RuntimeError( f"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" f"""The combined stderr from workers follows:\n{stderr}""" ) return result class __lowercase (UpperCamelCase__ ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ) -> List[str]: try: snake_case : List[str] = subprocess.check_output(lowercase ,stderr=subprocess.STDOUT ) if return_stdout: if hasattr(lowercase ,"""decode""" ): snake_case : List[str] = output.decode("""utf-8""" ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f"""Command `{" ".join(lowercase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
587
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''', '''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''', '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''', '''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''', } class _UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE : int = "funnel" __SCREAMING_SNAKE_CASE : Tuple = { "hidden_size": "d_model", "num_attention_heads": "n_head", } def __init__( self , lowercase_=3_0_5_2_2 , lowercase_=[4, 4, 4] , lowercase_=None , lowercase_=2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=6_4 , lowercase_=3_0_7_2 , lowercase_="gelu_new" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=None , lowercase_=1E-9 , lowercase_="mean" , lowercase_="relative_shift" , lowercase_=True , lowercase_=True , lowercase_=True , **lowercase_ , ) -> Union[str, Any]: UpperCAmelCase = vocab_size UpperCAmelCase = block_sizes UpperCAmelCase = [1] * len(__snake_case ) if block_repeats is None else block_repeats assert len(__snake_case ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." UpperCAmelCase = num_decoder_layers UpperCAmelCase = d_model UpperCAmelCase = n_head UpperCAmelCase = d_head UpperCAmelCase = d_inner UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = initializer_range UpperCAmelCase = initializer_std UpperCAmelCase = layer_norm_eps assert pooling_type in [ "mean", "max", ], F"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported." UpperCAmelCase = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported." UpperCAmelCase = attention_type UpperCAmelCase = separate_cls UpperCAmelCase = truncate_seq UpperCAmelCase = pool_q_only super().__init__(**__snake_case ) @property def a_ ( self ) -> Any: return sum(self.block_sizes ) @num_hidden_layers.setter def a_ ( self , lowercase_ ) -> Optional[int]: raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' ) @property def a_ ( self ) -> Optional[Any]: return len(self.block_sizes ) @num_blocks.setter def a_ ( self , lowercase_ ) -> Union[str, Any]: raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
705
"""simple docstring""" def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int: """simple docstring""" return int((input_a, input_a).count(0 ) == 0 ) def lowercase__ ( ) -> None: """simple docstring""" assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
183
0
'''simple docstring''' from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase = logging.get_logger(__name__) __lowercase = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : int = '''xlm-prophetnet''' UpperCAmelCase_ : int = ['''past_key_values'''] UpperCAmelCase_ : List[Any] = { '''num_attention_heads''': '''num_encoder_attention_heads''', } def __init__( self , __lowerCAmelCase = 0.1 , __lowerCAmelCase = "gelu" , __lowerCAmelCase = 30522 , __lowerCAmelCase = 1024 , __lowerCAmelCase = 4096 , __lowerCAmelCase = 12 , __lowerCAmelCase = 16 , __lowerCAmelCase = 4096 , __lowerCAmelCase = 12 , __lowerCAmelCase = 16 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 512 , __lowerCAmelCase = 0.02 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = 0 , __lowerCAmelCase = 2 , __lowerCAmelCase = 32 , __lowerCAmelCase = 128 , __lowerCAmelCase = False , __lowerCAmelCase = 0.0 , __lowerCAmelCase = True , __lowerCAmelCase = 0 , __lowerCAmelCase = 1 , __lowerCAmelCase = 2 , **__lowerCAmelCase , ): """simple docstring""" lowerCAmelCase = vocab_size lowerCAmelCase = hidden_size lowerCAmelCase = encoder_ffn_dim lowerCAmelCase = num_encoder_layers lowerCAmelCase = num_encoder_attention_heads lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = num_decoder_layers lowerCAmelCase = num_decoder_attention_heads lowerCAmelCase = max_position_embeddings lowerCAmelCase = init_std # Normal(0, this parameter) lowerCAmelCase = activation_function # parameters for xlmprophetnet lowerCAmelCase = ngram lowerCAmelCase = num_buckets lowerCAmelCase = relative_max_distance lowerCAmelCase = disable_ngram_loss lowerCAmelCase = eps # 3 Types of Dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = dropout lowerCAmelCase = use_cache super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , add_cross_attention=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) @property def a_ ( self): """simple docstring""" return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def a_ ( self , __lowerCAmelCase): """simple docstring""" raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and""" """ `num_decoder_layers`.""")
370
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=lowerCAmelCase__ ) class a__( lowerCAmelCase__ ): '''simple docstring''' UpperCAmelCase_ : str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCAmelCase_ : ClassVar[Features] = Features({'''audio''': Audio()} ) UpperCAmelCase_ : ClassVar[Features] = Features({'''transcription''': Value('''string''' )} ) UpperCAmelCase_ : str = "audio" UpperCAmelCase_ : str = "transcription" def a_ ( self , __lowerCAmelCase): """simple docstring""" if self.audio_column not in features: raise ValueError(f"Column {self.audio_column} is not present in features.") if not isinstance(features[self.audio_column] , __lowerCAmelCase): raise ValueError(f"Column {self.audio_column} is not an Audio type.") lowerCAmelCase = copy.deepcopy(self) lowerCAmelCase = self.input_schema.copy() lowerCAmelCase = features[self.audio_column] lowerCAmelCase = input_schema return task_template @property def a_ ( self): """simple docstring""" return {self.audio_column: "audio", self.transcription_column: "transcription"}
370
1
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _lowerCAmelCase : """simple docstring""" lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None lowerCamelCase = False lowerCamelCase = False lowerCamelCase = False lowerCamelCase = True lowerCamelCase = None lowerCamelCase = 1 lowerCamelCase = None lowerCamelCase = False lowerCamelCase = None lowerCamelCase = None def UpperCAmelCase_ ( self ) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(_lowerCamelCase ) for k, v in self.__dict__.items()} )
385
'''simple docstring''' import numpy as np import qiskit def UpperCAmelCase ( a_ = 8 , a_ = None ) -> str: """simple docstring""" A_ : List[Any] = np.random.default_rng(seed=a_ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. A_ : Union[str, Any] = 6 * key_len # Measurement basis for Alice's qubits. A_ : Dict = rng.integers(2 , size=a_ ) # The set of states Alice will prepare. A_ : Optional[int] = rng.integers(2 , size=a_ ) # Measurement basis for Bob's qubits. A_ : List[Any] = rng.integers(2 , size=a_ ) # Quantum Circuit to simulate BB84 A_ : Optional[Any] = qiskit.QuantumCircuit(a_ , name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(a_ ): if alice_state[index] == 1: bbaa_circ.x(a_ ) if alice_basis[index] == 1: bbaa_circ.h(a_ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(a_ ): if bob_basis[index] == 1: bbaa_circ.h(a_ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. A_ : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. A_ : int = qiskit.execute(a_ , a_ , shots=1 , seed_simulator=a_ ) # Returns the result of measurement. A_ : Any = job.result().get_counts(a_ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. A_ : Optional[Any] = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( a_ , a_ , a_ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. A_ : Optional[int] = gen_key[:key_len] if len(a_ ) >= key_len else gen_key.ljust(a_ , """0""" ) return key if __name__ == "__main__": print(f'The generated key is : {bbaa(8, seed=0)}') from doctest import testmod testmod()
385
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): lowercase_ : Union[str, Any] = IFInpaintingPipeline lowercase_ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} lowercase_ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowercase__ ( self : Optional[int] ): return self._get_dummy_components() def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=0 ): if str(__lowerCAmelCase ).startswith('mps' ): __snake_case = torch.manual_seed(__lowerCAmelCase ) else: __snake_case = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase ) __snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) __snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase ) __snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def lowercase__ ( self : Optional[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def lowercase__ ( self : Tuple ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def lowercase__ ( self : Optional[int] ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowercase__ ( self : Any ): self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowercase__ ( self : Tuple ): self._test_save_load_local() def lowercase__ ( self : Any ): self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
356
'''simple docstring''' from .configuration_bert_masked import MaskedBertConfig from .modeling_bert_masked import ( MaskedBertForMultipleChoice, MaskedBertForQuestionAnswering, MaskedBertForSequenceClassification, MaskedBertForTokenClassification, MaskedBertModel, ) from .modules import *
356
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = { 'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class UpperCAmelCase ( lowercase_): """simple docstring""" lowerCAmelCase_ = """umt5""" lowerCAmelCase_ = ["""past_key_values"""] def __init__( self : Any , UpperCamelCase__ : int=25_0112 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[Any]=64 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Tuple=8 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=6 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=1E-6 , UpperCamelCase__ : Dict=1.0 , UpperCamelCase__ : Any="gated-gelu" , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]="T5Tokenizer" , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : str=0 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple , ) -> Dict: super().__init__( is_encoder_decoder=UpperCamelCase__ , tokenizer_class=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , ) _UpperCamelCase =vocab_size _UpperCamelCase =d_model _UpperCamelCase =d_kv _UpperCamelCase =d_ff _UpperCamelCase =num_layers _UpperCamelCase =( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _UpperCamelCase =num_heads _UpperCamelCase =relative_attention_num_buckets _UpperCamelCase =relative_attention_max_distance _UpperCamelCase =dropout_rate _UpperCamelCase =layer_norm_epsilon _UpperCamelCase =initializer_factor _UpperCamelCase =feed_forward_proj _UpperCamelCase =use_cache _UpperCamelCase =self.feed_forward_proj.split('''-''' ) _UpperCamelCase =act_info[-1] _UpperCamelCase =act_info[0] == '''gated''' if len(UpperCamelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase__ ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) if feed_forward_proj == "gated-gelu": _UpperCamelCase ='''gelu_new''' @property def UpperCamelCase__ ( self : Dict ) -> Dict: return self.d_model @property def UpperCamelCase__ ( self : Optional[int] ) -> List[str]: return self.num_heads @property def UpperCamelCase__ ( self : Dict ) -> int: return self.num_layers class UpperCAmelCase ( lowercase_): """simple docstring""" @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def UpperCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: _UpperCamelCase ={ '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: _UpperCamelCase ='''past_encoder_sequence + sequence''' _UpperCamelCase ={0: '''batch'''} _UpperCamelCase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _UpperCamelCase ={0: '''batch''', 1: '''decoder_sequence'''} _UpperCamelCase ={0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase__ , direction='''inputs''' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def UpperCamelCase__ ( self : Tuple ) -> int: return 13 @property def UpperCamelCase__ ( self : Optional[Any] ) -> float: return 5E-4
271
'''simple docstring''' from collections.abc import Generator from math import sin def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" if len(__SCREAMING_SNAKE_CASE ) != 32: raise ValueError('''Input must be of length 32''' ) _UpperCamelCase =b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) _UpperCamelCase =format(__SCREAMING_SNAKE_CASE , '''08x''' )[-8:] _UpperCamelCase =b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCamelCase =b'''''' for char in message: bit_string += format(__SCREAMING_SNAKE_CASE , '''08b''' ).encode('''utf-8''' ) _UpperCamelCase =format(len(__SCREAMING_SNAKE_CASE ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__SCREAMING_SNAKE_CASE ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" if len(__SCREAMING_SNAKE_CASE ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(__SCREAMING_SNAKE_CASE ) , 512 ): _UpperCamelCase =bit_string[pos : pos + 512] _UpperCamelCase =[] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) _UpperCamelCase =format(__SCREAMING_SNAKE_CASE , '''032b''' ) _UpperCamelCase ='''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(__SCREAMING_SNAKE_CASE , 2 ) def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" return (a + b) % 2**32 def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCamelCase =preprocess(__SCREAMING_SNAKE_CASE ) _UpperCamelCase =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states _UpperCamelCase =0X6745_2301 _UpperCamelCase =0Xefcd_ab89 _UpperCamelCase =0X98ba_dcfe _UpperCamelCase =0X1032_5476 _UpperCamelCase =[ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__SCREAMING_SNAKE_CASE ): _UpperCamelCase =aa _UpperCamelCase =ba _UpperCamelCase =ca _UpperCamelCase =da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _UpperCamelCase =d ^ (b & (c ^ d)) _UpperCamelCase =i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _UpperCamelCase =c ^ (d & (b ^ c)) _UpperCamelCase =(5 * i + 1) % 16 elif i <= 47: _UpperCamelCase =b ^ c ^ d _UpperCamelCase =(3 * i + 5) % 16 else: _UpperCamelCase =c ^ (b | not_aa(__SCREAMING_SNAKE_CASE )) _UpperCamelCase =(7 * i) % 16 _UpperCamelCase =(f + a + added_consts[i] + block_words[g]) % 2**32 _UpperCamelCase =d _UpperCamelCase =c _UpperCamelCase =b _UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , left_rotate_aa(__SCREAMING_SNAKE_CASE , shift_amounts[i] ) ) # Add hashed chunk to running total _UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCamelCase =sum_aa(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _UpperCamelCase =reformat_hex(__SCREAMING_SNAKE_CASE ) + reformat_hex(__SCREAMING_SNAKE_CASE ) + reformat_hex(__SCREAMING_SNAKE_CASE ) + reformat_hex(__SCREAMING_SNAKE_CASE ) return digest if __name__ == "__main__": import doctest doctest.testmod()
271
1
import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : int=8 , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=99 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : int=5 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Optional[int]=36 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : int=16 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Any=0.0_2 , lowerCamelCase_ : int=3 , lowerCamelCase_ : int=4 , lowerCamelCase_ : Optional[int]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = num_choices UpperCamelCase = scope def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_input_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.get_config() UpperCamelCase = 300 return config def lowerCamelCase_ ( self : int ): """simple docstring""" ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = self.prepare_config_and_inputs() UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = MraModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , ): """simple docstring""" UpperCamelCase = True UpperCamelCase = MraModel(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , ) UpperCamelCase = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , ) UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] ): """simple docstring""" UpperCamelCase = MraForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = MraForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = MraForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Dict ): """simple docstring""" UpperCamelCase = self.num_choices UpperCamelCase = MraForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCamelCase = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = () def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = MraModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = MraModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @unittest.skip(reason="""MRA does not output attentions""" ) def lowerCamelCase_ ( self : Any ): """simple docstring""" return @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) UpperCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(lowerCamelCase_ )[0] UpperCamelCase = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor( [[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) UpperCamelCase = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(lowerCamelCase_ )[0] UpperCamelCase = 5_0265 UpperCamelCase = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor( [[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) ) @slow def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) UpperCamelCase = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): UpperCamelCase = model(lowerCamelCase_ )[0] UpperCamelCase = 5_0265 UpperCamelCase = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor( [[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
537
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): @register_to_config def __init__( self : List[Any] , lowerCamelCase_ : int = 128 , lowerCamelCase_ : int = 256 , lowerCamelCase_ : float = 2_0_0_0.0 , lowerCamelCase_ : int = 768 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 64 , lowerCamelCase_ : int = 2048 , lowerCamelCase_ : float = 0.1 , ): """simple docstring""" super().__init__() UpperCamelCase = nn.Sequential( nn.Linear(lowerCamelCase_ , d_model * 4 , bias=lowerCamelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCamelCase_ ) , nn.SiLU() , ) UpperCamelCase = nn.Embedding(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = False UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ ) UpperCamelCase = nn.Dropout(p=lowerCamelCase_ ) UpperCamelCase = nn.ModuleList() for lyr_num in range(lowerCamelCase_ ): # FiLM conditional T5 decoder UpperCamelCase = DecoderLayer(d_model=lowerCamelCase_ , d_kv=lowerCamelCase_ , num_heads=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ ) self.decoders.append(lowerCamelCase_ ) UpperCamelCase = TaLayerNorm(lowerCamelCase_ ) UpperCamelCase = nn.Dropout(p=lowerCamelCase_ ) UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase , UpperCamelCase = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. UpperCamelCase = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) UpperCamelCase = self.conditioning_emb(lowerCamelCase_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) UpperCamelCase = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. UpperCamelCase = torch.broadcast_to( torch.arange(lowerCamelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) UpperCamelCase = self.position_encoding(lowerCamelCase_ ) UpperCamelCase = self.continuous_inputs_projection(lowerCamelCase_ ) inputs += position_encodings UpperCamelCase = self.dropout(lowerCamelCase_ ) # decoder: No padding present. UpperCamelCase = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. UpperCamelCase = [(x, self.encoder_decoder_mask(lowerCamelCase_ , lowerCamelCase_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings UpperCamelCase = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) UpperCamelCase = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: UpperCamelCase = lyr( lowerCamelCase_ , conditioning_emb=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , )[0] UpperCamelCase = self.decoder_norm(lowerCamelCase_ ) UpperCamelCase = self.post_dropout(lowerCamelCase_ ) UpperCamelCase = self.spec_out(lowerCamelCase_ ) return spec_out class SCREAMING_SNAKE_CASE_ ( nn.Module ): def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any=1E-6 ): """simple docstring""" super().__init__() UpperCamelCase = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowerCamelCase_ , d_kv=lowerCamelCase_ , num_heads=lowerCamelCase_ , dropout_rate=lowerCamelCase_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowerCamelCase_ , d_kv=lowerCamelCase_ , num_heads=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ ) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=None , ): """simple docstring""" UpperCamelCase = self.layer[0]( lowerCamelCase_ , conditioning_emb=lowerCamelCase_ , attention_mask=lowerCamelCase_ , ) if encoder_hidden_states is not None: UpperCamelCase = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) UpperCamelCase = self.layer[1]( lowerCamelCase_ , key_value_states=lowerCamelCase_ , attention_mask=lowerCamelCase_ , ) # Apply Film Conditional Feed Forward layer UpperCamelCase = self.layer[-1](lowerCamelCase_ , lowerCamelCase_ ) return (hidden_states,) class SCREAMING_SNAKE_CASE_ ( nn.Module ): def __init__( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] ): """simple docstring""" super().__init__() UpperCamelCase = TaLayerNorm(lowerCamelCase_ ) UpperCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase_ ) UpperCamelCase = Attention(query_dim=lowerCamelCase_ , heads=lowerCamelCase_ , dim_head=lowerCamelCase_ , out_bias=lowerCamelCase_ , scale_qk=lowerCamelCase_ ) UpperCamelCase = nn.Dropout(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=None , lowerCamelCase_ : Optional[Any]=None , ): """simple docstring""" UpperCamelCase = self.layer_norm(lowerCamelCase_ ) if conditioning_emb is not None: UpperCamelCase = self.FiLMLayer(lowerCamelCase_ , lowerCamelCase_ ) # Self-attention block UpperCamelCase = self.attention(lowerCamelCase_ ) UpperCamelCase = hidden_states + self.dropout(lowerCamelCase_ ) return hidden_states class SCREAMING_SNAKE_CASE_ ( nn.Module ): def __init__( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict ): """simple docstring""" super().__init__() UpperCamelCase = Attention(query_dim=lowerCamelCase_ , heads=lowerCamelCase_ , dim_head=lowerCamelCase_ , out_bias=lowerCamelCase_ , scale_qk=lowerCamelCase_ ) UpperCamelCase = TaLayerNorm(lowerCamelCase_ , eps=lowerCamelCase_ ) UpperCamelCase = nn.Dropout(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , ): """simple docstring""" UpperCamelCase = self.layer_norm(lowerCamelCase_ ) UpperCamelCase = self.attention( lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , attention_mask=attention_mask.squeeze(1 ) , ) UpperCamelCase = hidden_states + self.dropout(lowerCamelCase_ ) return layer_output class SCREAMING_SNAKE_CASE_ ( nn.Module ): def __init__( self : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ): """simple docstring""" super().__init__() UpperCamelCase = TaDenseGatedActDense(d_model=lowerCamelCase_ , d_ff=lowerCamelCase_ , dropout_rate=lowerCamelCase_ ) UpperCamelCase = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCamelCase_ ) UpperCamelCase = TaLayerNorm(lowerCamelCase_ , eps=lowerCamelCase_ ) UpperCamelCase = nn.Dropout(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None ): """simple docstring""" UpperCamelCase = self.layer_norm(lowerCamelCase_ ) if conditioning_emb is not None: UpperCamelCase = self.film(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = self.DenseReluDense(lowerCamelCase_ ) UpperCamelCase = hidden_states + self.dropout(lowerCamelCase_ ) return hidden_states class SCREAMING_SNAKE_CASE_ ( nn.Module ): def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ): """simple docstring""" super().__init__() UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ ) UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ ) UpperCamelCase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ ) UpperCamelCase = nn.Dropout(lowerCamelCase_ ) UpperCamelCase = NewGELUActivation() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = self.act(self.wi_a(lowerCamelCase_ ) ) UpperCamelCase = self.wi_a(lowerCamelCase_ ) UpperCamelCase = hidden_gelu * hidden_linear UpperCamelCase = self.dropout(lowerCamelCase_ ) UpperCamelCase = self.wo(lowerCamelCase_ ) return hidden_states class SCREAMING_SNAKE_CASE_ ( nn.Module ): def __init__( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict=1E-6 ): """simple docstring""" super().__init__() UpperCamelCase = nn.Parameter(torch.ones(lowerCamelCase_ ) ) UpperCamelCase = eps def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCamelCase_ ) UpperCamelCase = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: UpperCamelCase = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class SCREAMING_SNAKE_CASE_ ( nn.Module ): def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor ): """simple docstring""" return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(lowerCamelCase_ , 3.0 )) )) class SCREAMING_SNAKE_CASE_ ( nn.Module ): def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): """simple docstring""" super().__init__() UpperCamelCase = nn.Linear(lowerCamelCase_ , out_features * 2 , bias=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): """simple docstring""" UpperCamelCase = self.scale_bias(lowerCamelCase_ ) UpperCamelCase , UpperCamelCase = torch.chunk(lowerCamelCase_ , 2 , -1 ) UpperCamelCase = x * (1 + scale) + shift return x
537
1
'''simple docstring''' from __future__ import annotations lowerCAmelCase : Tuple = tuple[int, int, int] lowerCAmelCase : Optional[Any] = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase lowerCAmelCase : List[str] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- lowerCAmelCase : str = """EGZWVONAHDCLFQMSIPJBYUKXTR""" lowerCAmelCase : List[Any] = """FOBHMDKEXQNRAULPGSJVTYICZW""" lowerCAmelCase : Tuple = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- lowerCAmelCase : int = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- lowerCAmelCase : Optional[Any] = """RMDJXFUWGISLHVTCQNKYPBEZOA""" lowerCAmelCase : str = """SGLCPQWZHKXAREONTFBVIYJUDM""" lowerCAmelCase : List[Any] = """HVSICLTYKQUBXDWAJZOMFGPREN""" lowerCAmelCase : List[str] = """RZWQHFMVDBKICJLNTUXAGYPSOE""" lowerCAmelCase : int = """LFKIJODBEGAMQPXVUHYSTCZRWN""" lowerCAmelCase : Optional[int] = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def lowercase (_A , _A , _A ): """simple docstring""" if (unique_rotsel := len(set(_A ) )) < 3: _lowerCAmelCase : Tuple = f'Please use 3 unique rotors (not {unique_rotsel})' raise Exception(_A ) # Checks if rotor positions are valid _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = rotpos if not 0 < rotorposa <= len(_A ): _lowerCAmelCase : Optional[int] = f'First rotor position is not within range of 1..26 ({rotorposa}' raise ValueError(_A ) if not 0 < rotorposa <= len(_A ): _lowerCAmelCase : Tuple = f'Second rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(_A ) if not 0 < rotorposa <= len(_A ): _lowerCAmelCase : Optional[Any] = f'Third rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(_A ) # Validates string and returns dict _lowerCAmelCase : str = _plugboard(_A ) return rotpos, rotsel, pbdict def lowercase (_A ): """simple docstring""" if not isinstance(_A , _A ): _lowerCAmelCase : Dict = f'Plugboard setting isn\'t type string ({type(_A )})' raise TypeError(_A ) elif len(_A ) % 2 != 0: _lowerCAmelCase : Dict = f'Odd number of symbols ({len(_A )})' raise Exception(_A ) elif pbstring == "": return {} pbstring.replace(' ' , '' ) # Checks if all characters are unique _lowerCAmelCase : Any = set() for i in pbstring: if i not in abc: _lowerCAmelCase : Any = f'\'{i}\' not in list of symbols' raise Exception(_A ) elif i in tmppbl: _lowerCAmelCase : int = f'Duplicate symbol ({i})' raise Exception(_A ) else: tmppbl.add(_A ) del tmppbl # Created the dictionary _lowerCAmelCase : List[Any] = {} for j in range(0 , len(_A ) - 1 , 2 ): _lowerCAmelCase : Optional[int] = pbstring[j + 1] _lowerCAmelCase : Optional[Any] = pbstring[j] return pb def lowercase (_A , _A , _A = (rotora, rotora, rotora) , _A = "" , ): """simple docstring""" _lowerCAmelCase : Tuple = text.upper() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = _validator( _A , _A , plugb.upper() ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = rotor_position _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 _lowerCAmelCase : Optional[Any] = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: _lowerCAmelCase : int = plugboard[symbol] # rotor ra -------------------------- _lowerCAmelCase : Optional[int] = abc.index(_A ) + rotorposa _lowerCAmelCase : Dict = rotora[index % len(_A )] # rotor rb -------------------------- _lowerCAmelCase : Tuple = abc.index(_A ) + rotorposa _lowerCAmelCase : Union[str, Any] = rotora[index % len(_A )] # rotor rc -------------------------- _lowerCAmelCase : List[str] = abc.index(_A ) + rotorposa _lowerCAmelCase : List[Any] = rotora[index % len(_A )] # reflector -------------------------- # this is the reason you don't need another machine to decipher _lowerCAmelCase : Optional[Any] = reflector[symbol] # 2nd rotors _lowerCAmelCase : Any = abc[rotora.index(_A ) - rotorposa] _lowerCAmelCase : int = abc[rotora.index(_A ) - rotorposa] _lowerCAmelCase : Union[str, Any] = abc[rotora.index(_A ) - rotorposa] # 2nd plugboard if symbol in plugboard: _lowerCAmelCase : int = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_A ): _lowerCAmelCase : Tuple = 0 rotorposa += 1 if rotorposa >= len(_A ): _lowerCAmelCase : Optional[Any] = 0 rotorposa += 1 if rotorposa >= len(_A ): _lowerCAmelCase : Dict = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_A ) return "".join(_A ) if __name__ == "__main__": lowerCAmelCase : Tuple = """This is my Python script that emulates the Enigma machine from WWII.""" lowerCAmelCase : Optional[Any] = (1, 1, 1) lowerCAmelCase : Optional[Any] = """pictures""" lowerCAmelCase : Dict = (rotora, rotora, rotora) lowerCAmelCase : Optional[int] = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
630
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """spiece.model"""} lowerCAmelCase : Optional[int] = { """vocab_file""": { """AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""", """AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""", } } lowerCAmelCase : Union[str, Any] = { """AI-Sweden/gpt-sw3-126m""": 20_48, """AI-Sweden/gpt-sw3-350m""": 20_48, """AI-Sweden/gpt-sw3-1.6b""": 20_48, """AI-Sweden/gpt-sw3-6.7b""": 20_48, """AI-Sweden/gpt-sw3-20b""": 20_48, } class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ["input_ids", "attention_mask"] def __init__( self , snake_case__ , snake_case__=False , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__ = None , **snake_case__ , ): '''simple docstring''' _lowerCAmelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCAmelCase : List[Any] = kwargs.get('name_or_path' ) if name_or_path is None: logger.warning( 'name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,' ' you are testing the model, this can safely be ignored' ) _lowerCAmelCase : Any = 'None' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _lowerCAmelCase : str = '<|endoftext|>' if eos_token is None else eos_token _lowerCAmelCase : Tuple = '<unk>' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _lowerCAmelCase : List[str] = unk_token if pad_token is None else pad_token _lowerCAmelCase : Optional[int] = eos_token if bos_token is None else bos_token else: _lowerCAmelCase : Tuple = '<pad>' if pad_token is None else pad_token _lowerCAmelCase : Union[str, Any] = '<s>' if bos_token is None else bos_token super().__init__( do_lower_case=snake_case__ , remove_space=snake_case__ , keep_accents=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) _lowerCAmelCase : Union[str, Any] = do_lower_case _lowerCAmelCase : Optional[int] = remove_space _lowerCAmelCase : Any = keep_accents _lowerCAmelCase : Optional[int] = vocab_file _lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) # Used for whitespace normalization in input texts # fmt : off _lowerCAmelCase : Optional[Any] = {' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '', '„'} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _lowerCAmelCase : Optional[Any] = re.compile( F'[{"".join(map(snake_case__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]' ) def __getstate__( self ): '''simple docstring''' _lowerCAmelCase : List[str] = self.__dict__.copy() _lowerCAmelCase : int = None return state def __setstate__( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _lowerCAmelCase : int = {} _lowerCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def a ( self ): '''simple docstring''' return len(self.sp_model ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = self.non_printing_characters_re.sub('' , snake_case__ ) # Normalize whitespaces _lowerCAmelCase : Tuple = ''.join([char if char not in self.whitespaces else ' ' for char in text] ) # NFC Unicode normalization _lowerCAmelCase : Union[str, Any] = unicodedata.normalize('NFC' , snake_case__ ) return text def a ( self , snake_case__ , **snake_case__ ): '''simple docstring''' _lowerCAmelCase : str = self.preprocess_text(snake_case__ ) return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.PieceToId(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.IdToPiece(snake_case__ ) @staticmethod def a ( snake_case__ ): '''simple docstring''' return out_string def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : int = [] _lowerCAmelCase : Optional[Any] = '' _lowerCAmelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(snake_case__ ) + token _lowerCAmelCase : Union[str, Any] = True _lowerCAmelCase : List[Any] = [] else: current_sub_tokens.append(snake_case__ ) _lowerCAmelCase : List[Any] = False out_string += self.sp_model.decode(snake_case__ ) return out_string def a ( self ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def a ( self , snake_case__ , snake_case__ = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _lowerCAmelCase : int = os.path.join( snake_case__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , 'wb' ) as fi: _lowerCAmelCase : Any = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,) def a ( self , snake_case__ , snake_case__ = False ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): _lowerCAmelCase : Optional[Any] = self.preprocess_text(snake_case__ ) _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) else: _lowerCAmelCase : Tuple = [self.preprocess_text(snake_case__ ) for t in text] _lowerCAmelCase : List[str] = self.sp_model.encode(snake_case__ ) if return_tensors is True or return_tensors == "pt": _lowerCAmelCase : int = torch.tensor(snake_case__ ) return token_ids def a ( self , snake_case__ ): '''simple docstring''' return self.sp_model.decode(snake_case__ ) def a ( self , snake_case__ ): '''simple docstring''' _lowerCAmelCase : Optional[int] = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()] _lowerCAmelCase : str = ( F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(snake_case__ ) + F'{self.bos_token}Bot:' ) return self.encode(text=snake_case__ )
630
1
from __future__ import annotations def snake_case_ (__A : Optional[int] ) -> Any: __lowerCAmelCase : Tuple = [True] * limit __lowerCAmelCase : int = False __lowerCAmelCase : Any = False __lowerCAmelCase : List[str] = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): __lowerCAmelCase : Optional[int] = i * 2 while index < limit: __lowerCAmelCase : str = False __lowerCAmelCase : Tuple = index + i __lowerCAmelCase : Dict = [2] for i in range(3 , __A , 2 ): if is_prime[i]: primes.append(__A ) return primes def snake_case_ (__A : Dict = 1_0_0_0_0_0_0 ) -> Union[str, Any]: __lowerCAmelCase : str = prime_sieve(__A ) __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : Union[str, Any] = 0 for i in range(len(__A ) ): for j in range(i + length , len(__A ) ): __lowerCAmelCase : List[Any] = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: __lowerCAmelCase : List[Any] = j - i __lowerCAmelCase : Optional[Any] = sol return largest if __name__ == "__main__": print(F'{solution() = }')
651
'''simple docstring''' A_ = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) A_ = frozenset(["prompt", "negative_prompt"]) A_ = frozenset([]) A_ = frozenset(["image"]) A_ = frozenset( [ "image", "height", "width", "guidance_scale", ] ) A_ = frozenset(["image"]) A_ = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) A_ = frozenset(["prompt", "image", "negative_prompt"]) A_ = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) A_ = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) A_ = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) A_ = frozenset(["image", "mask_image"]) A_ = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) A_ = frozenset(["example_image", "image", "mask_image"]) A_ = frozenset(["class_labels"]) A_ = frozenset(["class_labels"]) A_ = frozenset(["batch_size"]) A_ = frozenset([]) A_ = frozenset(["batch_size"]) A_ = frozenset([]) A_ = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) A_ = frozenset(["prompt", "negative_prompt"]) A_ = frozenset(["input_tokens"]) A_ = frozenset(["input_tokens"])
143
0
"""simple docstring""" from __future__ import annotations import string from itertools import cycle, product from pathlib import Path SCREAMING_SNAKE_CASE_ : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) SCREAMING_SNAKE_CASE_ : list[int] = [ord(letter) for letter in string.ascii_lowercase] SCREAMING_SNAKE_CASE_ : set[int] = {ord(char) for char in VALID_CHARS} SCREAMING_SNAKE_CASE_ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def _snake_case ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : tuple[int, ...] ): A__ = """""" A__ = 42 A__ = 42 A__ = 42 for keychar, cipherchar in zip(cycle(UpperCAmelCase_ ) , UpperCAmelCase_ ): A__ = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(UpperCAmelCase_ ) return decoded def _snake_case ( UpperCAmelCase_ : list[int] ): A__ = [] for key in product(UpperCAmelCase_ , repeat=3 ): A__ = try_key(UpperCAmelCase_ , UpperCAmelCase_ ) if encoded is not None: possibles.append(UpperCAmelCase_ ) return possibles def _snake_case ( UpperCAmelCase_ : list[str] , UpperCAmelCase_ : str ): return [possible for possible in possibles if common_word in possible.lower()] def _snake_case ( UpperCAmelCase_ : str = "p059_cipher.txt" ): A__ = 42 A__ = 42 A__ = 42 A__ = 42 A__ = Path(UpperCAmelCase_ ).parent.joinpath(UpperCAmelCase_ ).read_text(encoding="""utf-8""" ) A__ = [int(UpperCAmelCase_ ) for number in data.strip().split(""",""" )] A__ = filter_valid_chars(UpperCAmelCase_ ) for common_word in COMMON_WORDS: A__ = filter_common_word(UpperCAmelCase_ , UpperCAmelCase_ ) if len(UpperCAmelCase_ ) == 1: break A__ = possibles[0] return sum(ord(UpperCAmelCase_ ) for char in decoded_text ) if __name__ == "__main__": print(f"""{solution() = }""")
713
"""simple docstring""" from __future__ import annotations class a : """simple docstring""" def __init__( self: Any , UpperCamelCase: str , UpperCamelCase: str ): """simple docstring""" A__ , A__ = text, pattern A__ , A__ = len(UpperCamelCase ), len(UpperCamelCase ) def UpperCamelCase ( self: Dict , UpperCamelCase: str ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def UpperCamelCase ( self: str , UpperCamelCase: int ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ = [] for i in range(self.textLen - self.patLen + 1 ): A__ = self.mismatch_in_text(UpperCamelCase ) if mismatch_index == -1: positions.append(UpperCamelCase ) else: A__ = self.match_in_pattern(self.text[mismatch_index] ) A__ = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions SCREAMING_SNAKE_CASE_ : List[Any] = 'ABAABA' SCREAMING_SNAKE_CASE_ : List[Any] = 'AB' SCREAMING_SNAKE_CASE_ : Union[str, Any] = BoyerMooreSearch(text, pattern) SCREAMING_SNAKE_CASE_ : int = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
500
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __A : str = logging.get_logger(__name__) __A : str = { 'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': ( 'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class _UpperCamelCase ( _A ): '''simple docstring''' SCREAMING_SNAKE_CASE:List[Any] = 'trajectory_transformer' SCREAMING_SNAKE_CASE:List[Any] = ['past_key_values'] SCREAMING_SNAKE_CASE:Dict = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , _a=100 , _a=5 , _a=1 , _a=1 , _a=249 , _a=6 , _a=17 , _a=25 , _a=4 , _a=4 , _a=128 , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0006 , _a=512 , _a=0.02 , _a=1e-12 , _a=1 , _a=True , _a=1 , _a=5_0256 , _a=5_0256 , **_a , ): """simple docstring""" a__ = vocab_size a__ = action_weight a__ = reward_weight a__ = value_weight a__ = max_position_embeddings a__ = block_size a__ = action_dim a__ = observation_dim a__ = transition_dim a__ = learning_rate a__ = n_layer a__ = n_head a__ = n_embd a__ = embd_pdrop a__ = attn_pdrop a__ = resid_pdrop a__ = initializer_range a__ = layer_norm_eps a__ = kaiming_initializer_range a__ = use_cache super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
394
'''simple docstring''' def lowerCAmelCase_ ( a : list , a : int , a : int = 0 , a : int = 0 ): a__ = right or len(a ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(a , a , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
394
1
"""simple docstring""" from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder UpperCamelCase_ = datasets.utils.logging.get_logger(__name__) class snake_case ( folder_based_builder.FolderBasedBuilderConfig ): a_ : Optional[int] = None a_ : int = None class snake_case ( folder_based_builder.FolderBasedBuilder ): a_ : int = datasets.Audio() a_ : Tuple = """audio""" a_ : Optional[Any] = AudioFolderConfig a_ : Dict = 42 # definition at the bottom of the script a_ : Tuple = AudioClassification(audio_column="""audio""" , label_column="""label""" ) UpperCamelCase_ = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] UpperCamelCase_ = AUDIO_EXTENSIONS
713
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self) ->Optional[Any]: a_ = "hf-internal-testing/tiny-random-t5" a_ = AutoTokenizer.from_pretrained(__UpperCAmelCase) a_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase) a_ = tokenizer("This is me" , return_tensors="pt") a_ = model.to_bettertransformer() self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) a_ = model.generate(**__UpperCAmelCase) a_ = model.reverse_bettertransformer() self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCAmelCase) a_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase) self.assertFalse( any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules())) a_ = model_reloaded.generate(**__UpperCAmelCase) self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase)) def UpperCAmelCase__ ( self) ->List[Any]: a_ = "hf-internal-testing/tiny-random-t5" a_ = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase) a_ = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(__UpperCAmelCase): model.save_pretrained(__UpperCAmelCase) a_ = model.reverse_bettertransformer() model.save_pretrained(__UpperCAmelCase)
210
0
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") UpperCamelCase = {"""target_lang""": """fi""", """source_lang""": """en"""} UpperCamelCase = """>>zh<<""" UpperCamelCase = """Helsinki-NLP/""" if is_torch_available(): UpperCamelCase = """pt""" elif is_tf_available(): UpperCamelCase = """tf""" else: UpperCamelCase = """jax""" @require_sentencepiece class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ): """simple docstring""" snake_case = MarianTokenizer snake_case = False snake_case = True def _snake_case ( self )->str: '''simple docstring''' super().setUp() A_ : Union[str, Any] = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] A_ : List[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) ) A_ : int = Path(self.tmpdirname ) save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''vocab'''] ) save_json(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''source_spm'''] ) copyfile(_SCREAMING_SNAKE_CASE , save_dir / VOCAB_FILES_NAMES['''target_spm'''] ) A_ : Optional[int] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->MarianTokenizer: '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ) def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[int]: '''simple docstring''' return ( "This is a test", "This is a test", ) def _snake_case ( self )->List[Any]: '''simple docstring''' A_ : str = '''</s>''' A_ : Tuple = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Tuple: '''simple docstring''' A_ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''</s>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 9 ) def _snake_case ( self )->Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : List[str] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' ) A_ : Union[str, Any] = en_de_tokenizer(['''I am a small frog'''] , return_tensors=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(_SCREAMING_SNAKE_CASE , batch.input_ids[0] ) A_ : str = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) A_ : List[Any] = [x.name for x in Path(_SCREAMING_SNAKE_CASE ).glob('''*''' )] self.assertIn('''source.spm''' , _SCREAMING_SNAKE_CASE ) MarianTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : Union[str, Any] = self.get_tokenizer() A_ : Any = tok( ['''I am a small frog''' * 1000, '''I am a small frog'''] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def _snake_case ( self )->Dict: '''simple docstring''' A_ : Union[str, Any] = self.get_tokenizer() A_ : int = tok(['''I am a tiny frog''', '''I am a small frog'''] , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def _snake_case ( self )->int: '''simple docstring''' A_ : int = {'''input_ids''': [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name='''Helsinki-NLP/opus-mt-en-de''' , revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''' , decode_kwargs={'''use_source_tokenizer''': True} , ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' A_ : List[str] = MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' ) A_ : str = '''Tämä on testi''' A_ : Tuple = '''This is a test''' A_ : Dict = [76, 7, 2047, 2] A_ : List[str] = [69, 12, 11, 940, 2] A_ : Dict = tokenizer(_SCREAMING_SNAKE_CASE ).input_ids self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = tokenizer(text_target=_SCREAMING_SNAKE_CASE ).input_ids self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) A_ : Any = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
590
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ): """simple docstring""" snake_case = KandinskyVaaImgaImgPipeline snake_case = ["image_embeds", "negative_image_embeds", "image"] snake_case = [ "image_embeds", "negative_image_embeds", "image", ] snake_case = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] snake_case = False @property def _snake_case ( self )->Optional[int]: '''simple docstring''' return 32 @property def _snake_case ( self )->Optional[Any]: '''simple docstring''' return 32 @property def _snake_case ( self )->List[Any]: '''simple docstring''' return self.time_input_dim @property def _snake_case ( self )->str: '''simple docstring''' return self.time_input_dim * 4 @property def _snake_case ( self )->Union[str, Any]: '''simple docstring''' return 100 @property def _snake_case ( self )->Optional[int]: '''simple docstring''' torch.manual_seed(0 ) A_ : Optional[int] = { '''in_channels''': 4, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } A_ : Tuple = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE ) return model @property def _snake_case ( self )->int: '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _snake_case ( self )->Tuple: '''simple docstring''' torch.manual_seed(0 ) A_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _snake_case ( self )->Optional[int]: '''simple docstring''' A_ : Dict = self.dummy_unet A_ : Optional[int] = self.dummy_movq A_ : Dict = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.0_0_0_8_5, '''beta_end''': 0.0_1_2, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } A_ : List[Any] = DDIMScheduler(**_SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->List[str]: '''simple docstring''' A_ : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) A_ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _SCREAMING_SNAKE_CASE ) # create init_image A_ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) A_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Optional[int] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((256, 256) ) if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ): A_ : Optional[int] = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: A_ : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) A_ : Dict = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def _snake_case ( self )->Optional[int]: '''simple docstring''' A_ : Any = '''cpu''' A_ : List[str] = self.get_dummy_components() A_ : Any = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) A_ : int = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) A_ : List[str] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ) A_ : Optional[Any] = output.images A_ : Union[str, Any] = pipe( **self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0] A_ : Any = image[0, -3:, -3:, -1] A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A_ : Any = np.array( [0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self )->Dict: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self )->Dict: '''simple docstring''' A_ : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_img2img_frog.npy''' ) A_ : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) A_ : Dict = '''A red cartoon frog, 4k''' A_ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_SCREAMING_SNAKE_CASE ) A_ : int = KandinskyVaaImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa ) A_ : int = pipeline.to(_SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) A_ : Optional[int] = torch.Generator(device='''cpu''' ).manual_seed(0 ) A_ , A_ : Optional[Any] = pipe_prior( _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() A_ : List[Any] = pipeline( image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , ) A_ : Union[str, Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
590
1
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowerCamelCase : int = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : Any , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 50_257 , _UpperCamelCase : int = 1_024 , _UpperCamelCase : int = 768 , _UpperCamelCase : int = 12 , _UpperCamelCase : int = 12 , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : str = "gelu_new" , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 0.1 , _UpperCamelCase : float = 1e-5 , _UpperCamelCase : float = 0.0_2 , _UpperCamelCase : bool = True , _UpperCamelCase : bool = True , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , ): super().__init__() _lowercase: List[Any] = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and" f" `n_embd`: {n_embd} are not equal.") _lowercase: List[str] = prefix_inner_dim _lowercase: List[Any] = prefix_hidden_dim _lowercase: str = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim) if self.prefix_hidden_dim is not None else nn.Identity() ) _lowercase: Tuple = ( nn.Linear(self.prefix_hidden_dim , _UpperCamelCase) if self.prefix_hidden_dim is not None else nn.Identity() ) _lowercase: Tuple = GPTaConfig( vocab_size=_UpperCamelCase , n_positions=_UpperCamelCase , n_embd=_UpperCamelCase , n_layer=_UpperCamelCase , n_head=_UpperCamelCase , n_inner=_UpperCamelCase , activation_function=_UpperCamelCase , resid_pdrop=_UpperCamelCase , embd_pdrop=_UpperCamelCase , attn_pdrop=_UpperCamelCase , layer_norm_epsilon=_UpperCamelCase , initializer_range=_UpperCamelCase , scale_attn_weights=_UpperCamelCase , use_cache=_UpperCamelCase , scale_attn_by_inverse_layer_idx=_UpperCamelCase , reorder_and_upcast_attn=_UpperCamelCase , ) _lowercase: List[Any] = GPTaLMHeadModel(_UpperCamelCase) def UpperCAmelCase__ ( self : int , _UpperCamelCase : torch.Tensor , _UpperCamelCase : torch.Tensor , _UpperCamelCase : Optional[torch.Tensor] = None , _UpperCamelCase : Optional[torch.Tensor] = None , ): _lowercase: Optional[Any] = self.transformer.transformer.wte(_UpperCamelCase) _lowercase: Tuple = self.encode_prefix(_UpperCamelCase) _lowercase: Tuple = self.decode_prefix(_UpperCamelCase) _lowercase: int = torch.cat((prefix_embeds, embedding_text) , dim=1) if labels is not None: _lowercase: Optional[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device) _lowercase: Dict = torch.cat((dummy_token, input_ids) , dim=1) _lowercase: Dict = self.transformer(inputs_embeds=_UpperCamelCase , labels=_UpperCamelCase , attention_mask=_UpperCamelCase) if self.prefix_hidden_dim is not None: return out, hidden else: return out def UpperCAmelCase__ ( self : List[Any] , _UpperCamelCase : int , _UpperCamelCase : torch.device): return torch.zeros(_UpperCamelCase , self.prefix_length , dtype=torch.intaa , device=_UpperCamelCase) def UpperCAmelCase__ ( self : List[str] , _UpperCamelCase : List[Any]): return self.encode_prefix(_UpperCamelCase) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : str): _lowercase: int = torch.split(_UpperCamelCase , 1 , dim=0) _lowercase: Dict = [] _lowercase: Dict = [] for feature in features: _lowercase: str = self.decode_prefix(feature.to(_UpperCamelCase)) # back to the clip feature # Only support beam search for now _lowercase: str = self.generate_beam( input_embeds=_UpperCamelCase , device=_UpperCamelCase , eos_token_id=_UpperCamelCase) generated_tokens.append(output_tokens[0]) generated_seq_lengths.append(seq_lengths[0]) _lowercase: Any = torch.stack(_UpperCamelCase) _lowercase: str = torch.stack(_UpperCamelCase) return generated_tokens, generated_seq_lengths @torch.no_grad() def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : List[str]=None , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : int = 5 , _UpperCamelCase : int = 67 , _UpperCamelCase : float = 1.0 , _UpperCamelCase : Optional[int] = None , ): _lowercase: int = eos_token_id _lowercase: Dict = None _lowercase: Any = None _lowercase: Optional[Any] = torch.ones(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.int) _lowercase: List[Any] = torch.zeros(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.bool) if input_embeds is not None: _lowercase: Dict = input_embeds else: _lowercase: Dict = self.transformer.transformer.wte(_UpperCamelCase) for i in range(_UpperCamelCase): _lowercase: Optional[Any] = self.transformer(inputs_embeds=_UpperCamelCase) _lowercase: str = outputs.logits _lowercase: int = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) _lowercase: Optional[Any] = logits.softmax(-1).log() if scores is None: _lowercase: Tuple = logits.topk(_UpperCamelCase , -1) _lowercase: Dict = generated.expand(_UpperCamelCase , *generated.shape[1:]) _lowercase: Optional[int] = next_tokens.permute(1 , 0), scores.squeeze(0) if tokens is None: _lowercase: int = next_tokens else: _lowercase: Any = tokens.expand(_UpperCamelCase , *tokens.shape[1:]) _lowercase: List[str] = torch.cat((tokens, next_tokens) , dim=1) else: _lowercase: List[str] = -float(np.inf) _lowercase: str = 0 _lowercase: Any = scores[:, None] + logits seq_lengths[~is_stopped] += 1 _lowercase: Tuple = scores_sum / seq_lengths[:, None] _lowercase: Dict = scores_sum_average.view(-1).topk(_UpperCamelCase , -1) _lowercase: Dict = next_tokens // scores_sum.shape[1] _lowercase: Tuple = seq_lengths[next_tokens_source] _lowercase: str = next_tokens % scores_sum.shape[1] _lowercase: Union[str, Any] = next_tokens.unsqueeze(1) _lowercase: int = tokens[next_tokens_source] _lowercase: Optional[int] = torch.cat((tokens, next_tokens) , dim=1) _lowercase: Union[str, Any] = generated[next_tokens_source] _lowercase: Optional[int] = scores_sum_average * seq_lengths _lowercase: Dict = is_stopped[next_tokens_source] _lowercase: str = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1) _lowercase: Optional[int] = torch.cat((generated, next_token_embed) , dim=1) _lowercase: List[str] = is_stopped + next_tokens.eq(_UpperCamelCase).squeeze() if is_stopped.all(): break _lowercase: Any = scores / seq_lengths _lowercase: Union[str, Any] = scores.argsort(descending=_UpperCamelCase) # tokens tensors are already padded to max_seq_length _lowercase: List[Any] = [tokens[i] for i in order] _lowercase: List[Any] = torch.stack(_UpperCamelCase , dim=0) _lowercase: List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype) return output_texts, seq_lengths
706
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ): if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) _lowercase: int = str(bin(__magic_name__ ) )[2:] # remove the leading "0b" _lowercase: str = str(bin(__magic_name__ ) )[2:] # remove the leading "0b" _lowercase: Any = max(len(__magic_name__ ) , len(__magic_name__ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
206
0
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = 1, __snake_case = 1, __snake_case = 1.0e4, __snake_case = False, __snake_case = 1.0, ) -> jnp.ndarray: """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even''' _UpperCamelCase = float(embedding_dim // 2 ) _UpperCamelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) _UpperCamelCase = min_timescale * jnp.exp(jnp.arange(__snake_case, dtype=jnp.floataa ) * -log_timescale_increment ) _UpperCamelCase = jnp.expand_dims(__snake_case, 1 ) * jnp.expand_dims(__snake_case, 0 ) # scale embeddings _UpperCamelCase = scale * emb if flip_sin_to_cos: _UpperCamelCase = jnp.concatenate([jnp.cos(__snake_case ), jnp.sin(__snake_case )], axis=1 ) else: _UpperCamelCase = jnp.concatenate([jnp.sin(__snake_case ), jnp.cos(__snake_case )], axis=1 ) _UpperCamelCase = jnp.reshape(__snake_case, [jnp.shape(__snake_case )[0], embedding_dim] ) return signal class _UpperCAmelCase( nn.Module ): lowercase__ = 32 lowercase__ = jnp.floataa @nn.compact def __call__( self , __a) -> Any: '''simple docstring''' _UpperCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''')(__a) _UpperCamelCase = nn.silu(__a) _UpperCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''')(__a) return temb class _UpperCAmelCase( nn.Module ): lowercase__ = 32 lowercase__ = False lowercase__ = 1 @nn.compact def __call__( self , __a) -> Optional[Any]: '''simple docstring''' return get_sinusoidal_embeddings( __a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
19
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil _a = 100 _a = set(range(3, NUM_PRIMES, 2)) primes.add(2) _a = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=1_00 ) def lowerCamelCase__ ( __snake_case ) -> set[int]: """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} _UpperCamelCase = set() _UpperCamelCase = 42 _UpperCamelCase = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def lowerCamelCase__ ( __snake_case = 50_00 ) -> int | None: """simple docstring""" for number_to_partition in range(1, __snake_case ): if len(partition(__snake_case ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"""{solution() = }""")
19
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A : Tuple = { 'configuration_pix2struct': [ 'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Pix2StructConfig', 'Pix2StructTextConfig', 'Pix2StructVisionConfig', ], 'processing_pix2struct': ['Pix2StructProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : List[Any] = ['Pix2StructImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ 'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'Pix2StructPreTrainedModel', 'Pix2StructForConditionalGeneration', 'Pix2StructVisionModel', 'Pix2StructTextModel', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys A : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
706
'''simple docstring''' import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class SCREAMING_SNAKE_CASE( tf.keras.optimizers.schedules.LearningRateSchedule ): def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1.0 , lowerCamelCase__ = None , ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = initial_learning_rate __lowercase = warmup_steps __lowercase = power __lowercase = decay_schedule_fn __lowercase = name def __call__( self , lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" with tf.name_scope(self.name or """WarmUp""" ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. __lowercase = tf.cast(lowerCamelCase__ , tf.floataa ) __lowercase = tf.cast(self.warmup_steps , tf.floataa ) __lowercase = global_step_float / warmup_steps_float __lowercase = self.initial_learning_rate * tf.math.pow(lowerCamelCase__ , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase__ , ) def snake_case__ ( self ) -> Optional[Any]: """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def snake_case_ ( a__ : float ,a__ : int ,a__ : int ,a__ : float = 0.0 ,a__ : float = 0.9 ,a__ : float = 0.9_9_9 ,a__ : float = 1e-8 ,a__ : Optional[float] = None ,a__ : Optional[float] = None ,a__ : float = 0.0 ,a__ : float = 1.0 ,a__ : Optional[List[str]] = None ,): """simple docstring""" __lowercase = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=a__ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=a__ ,) if num_warmup_steps: __lowercase = WarmUp( initial_learning_rate=a__ ,decay_schedule_fn=a__ ,warmup_steps=a__ ,) if weight_decay_rate > 0.0: __lowercase = AdamWeightDecay( learning_rate=a__ ,weight_decay_rate=a__ ,beta_a=a__ ,beta_a=a__ ,epsilon=a__ ,clipnorm=a__ ,global_clipnorm=a__ ,exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] ,include_in_weight_decay=a__ ,) else: __lowercase = tf.keras.optimizers.Adam( learning_rate=a__ ,beta_a=a__ ,beta_a=a__ ,epsilon=a__ ,clipnorm=a__ ,global_clipnorm=a__ ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class SCREAMING_SNAKE_CASE( __A ): def __init__( self , lowerCamelCase__ = 0.0_01 , lowerCamelCase__ = 0.9 , lowerCamelCase__ = 0.9_99 , lowerCamelCase__ = 1E-7 , lowerCamelCase__ = False , lowerCamelCase__ = 0.0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = "AdamWeightDecay" , **lowerCamelCase__ , ) -> Any: """simple docstring""" super().__init__(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) __lowercase = weight_decay_rate __lowercase = include_in_weight_decay __lowercase = exclude_from_weight_decay @classmethod def snake_case__ ( cls , lowerCamelCase__ ) -> Optional[int]: """simple docstring""" __lowercase = {"""WarmUp""": WarmUp} return super(lowerCamelCase__ , cls ).from_config(lowerCamelCase__ , custom_objects=lowerCamelCase__ ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]: """simple docstring""" super(lowerCamelCase__ , self )._prepare_local(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowercase = tf.constant( self.weight_decay_rate , name="""adam_weight_decay_rate""" ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]: """simple docstring""" __lowercase = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , ) return tf.no_op() def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=None , **lowerCamelCase__ ) -> Any: """simple docstring""" __lowercase ,__lowercase = list(zip(*lowerCamelCase__ ) ) return super(lowerCamelCase__ , self ).apply_gradients(zip(lowerCamelCase__ , lowerCamelCase__ ) , name=lowerCamelCase__ , **lowerCamelCase__ ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any: """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} __lowercase = apply_state or {} __lowercase = apply_state.get((var_device, var_dtype) ) if coefficients is None: __lowercase = self._fallback_apply_state(lowerCamelCase__ , lowerCamelCase__ ) __lowercase = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Any: """simple docstring""" __lowercase ,__lowercase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ ) __lowercase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase__ , self )._resource_apply_dense(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Dict: """simple docstring""" __lowercase ,__lowercase = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase__ ) __lowercase = self._decay_weights_op(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) with tf.control_dependencies([decay] ): return super(lowerCamelCase__ , self )._resource_apply_sparse(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) def snake_case__ ( self ) -> Tuple: """simple docstring""" __lowercase = super().get_config() config.update({"""weight_decay_rate""": self.weight_decay_rate} ) return config def snake_case__ ( self , lowerCamelCase__ ) -> Tuple: """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowerCamelCase__ , lowerCamelCase__ ) is not None: return False return True class SCREAMING_SNAKE_CASE( __A ): def __init__( self ) -> Optional[int]: """simple docstring""" __lowercase = [] __lowercase = None @property def snake_case__ ( self ) -> Union[str, Any]: """simple docstring""" if self._accum_steps is None: __lowercase = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def snake_case__ ( self ) -> Union[str, Any]: """simple docstring""" if not self._gradients: raise ValueError("""The accumulator should be called first to initialize the gradients""" ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , lowerCamelCase__ ) -> Dict: """simple docstring""" if not self._gradients: __lowercase = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowerCamelCase__ ) , trainable=lowerCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowerCamelCase__ ) != len(self._gradients ): raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}' ) for accum_gradient, gradient in zip(self._gradients , lowerCamelCase__ ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowerCamelCase__ ) self._accum_steps.assign_add(1 ) def snake_case__ ( self ) -> str: """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
163
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu A : List[str] = False class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def snake_case ( self ): return 12 @property def snake_case ( self ): return 12 @property def snake_case ( self ): return 32 @property def snake_case ( self ): torch.manual_seed(0 ) __lowerCAmelCase = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def snake_case ( self ): __lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def snake_case ( self ): torch.manual_seed(0 ) __lowerCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(UpperCamelCase__ ) @property def snake_case ( self ): torch.manual_seed(0 ) __lowerCAmelCase = 12 __lowerCAmelCase = 12 __lowerCAmelCase = { "attention_bias": True, "cross_attention_dim": 32, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 32, "sample_size": width, "activation_fn": "geglu-approximate", } __lowerCAmelCase = TransformeraDModel(**UpperCamelCase__ ) return model def snake_case ( self ): __lowerCAmelCase = "cpu" __lowerCAmelCase = self.dummy_vqvae __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_transformer __lowerCAmelCase = VQDiffusionScheduler(self.num_embed ) __lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase__ ) __lowerCAmelCase = VQDiffusionPipeline( vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , ) __lowerCAmelCase = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __lowerCAmelCase = "teddy bear playing in the pool" __lowerCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 ) __lowerCAmelCase = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" ) __lowerCAmelCase = output.images __lowerCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 ) __lowerCAmelCase = pipe( [prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCAmelCase = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case ( self ): __lowerCAmelCase = "cpu" __lowerCAmelCase = self.dummy_vqvae __lowerCAmelCase = self.dummy_text_encoder __lowerCAmelCase = self.dummy_tokenizer __lowerCAmelCase = self.dummy_transformer __lowerCAmelCase = VQDiffusionScheduler(self.num_embed ) __lowerCAmelCase = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCamelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __lowerCAmelCase = VQDiffusionPipeline( vqvae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , transformer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , ) __lowerCAmelCase = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) __lowerCAmelCase = "teddy bear playing in the pool" __lowerCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 ) __lowerCAmelCase = pipe([prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" ) __lowerCAmelCase = output.images __lowerCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 ) __lowerCAmelCase = pipe( [prompt] , generator=UpperCamelCase__ , output_type="np" , return_dict=UpperCamelCase__ , num_inference_steps=2 )[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __lowerCAmelCase = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def snake_case ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self ): __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) __lowerCAmelCase = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) __lowerCAmelCase = pipeline.to(UpperCamelCase__ ) pipeline.set_progress_bar_config(disable=UpperCamelCase__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __lowerCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 ) __lowerCAmelCase = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=UpperCamelCase__ , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (2_56, 2_56, 3) assert np.abs(expected_image - image ).max() < 2.0
636
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {'vocab_file': 'vocab.json'} __lowerCamelCase : Optional[Any] = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } __lowerCamelCase : List[Any] = {'mgp-str': 27} class UpperCAmelCase ( lowercase_): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[str]="[GO]" , UpperCamelCase__ : Optional[Any]="[GO]" , UpperCamelCase__ : int="[s]" , UpperCamelCase__ : Dict="[GO]" , **UpperCamelCase__ : List[Any] ) -> List[str]: super().__init__( unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , **UpperCamelCase__ , ) with open(UpperCamelCase__ , encoding='''utf-8''' ) as vocab_handle: _UpperCamelCase =json.load(UpperCamelCase__ ) _UpperCamelCase ={v: k for k, v in self.vocab.items()} @property def UpperCamelCase__ ( self : Union[str, Any] ) -> Tuple: return len(self.vocab ) def UpperCamelCase__ ( self : int ) -> Union[str, Any]: return dict(self.vocab , **self.added_tokens_encoder ) def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : str ) -> List[str]: _UpperCamelCase =[] for s in text: char_tokens.extend(UpperCamelCase__ ) return char_tokens def UpperCamelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[int] ) -> Dict: return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) ) def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Any: return self.decoder.get(UpperCamelCase__ ) def UpperCamelCase__ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase__ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(UpperCamelCase__ ) ) return _UpperCamelCase =os.path.join( UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '''\n''' ) return (vocab_file,)
404
0
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class _A ( nn.Module ): """simple docstring""" def __init__( self : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = "geglu" , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "layer_norm" , __UpperCAmelCase : bool = False , ): super().__init__() a : int = only_cross_attention a : List[str] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" a : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''') # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: a : Any = AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase) elif self.use_ada_layer_norm_zero: a : Optional[int] = AdaLayerNormZero(__UpperCAmelCase , __UpperCAmelCase) else: a : Optional[int] = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase) a : int = Attention( query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__UpperCAmelCase , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. a : Dict = ( AdaLayerNorm(__UpperCAmelCase , __UpperCAmelCase) if self.use_ada_layer_norm else nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase) ) a : Union[str, Any] = Attention( query_dim=__UpperCAmelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , dropout=__UpperCAmelCase , bias=__UpperCAmelCase , upcast_attention=__UpperCAmelCase , ) # is self-attn if encoder_hidden_states is none else: a : int = None a : List[Any] = None # 3. Feed-forward a : Tuple = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase) a : Optional[Any] = FeedForward(__UpperCAmelCase , dropout=__UpperCAmelCase , activation_fn=__UpperCAmelCase , final_dropout=__UpperCAmelCase) # let chunk size default to None a : str = None a : List[Any] = 0 def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int): # Sets chunk feed-forward a : Dict = chunk_size a : Union[str, Any] = dim def __snake_case ( self : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[torch.LongTensor] = None , __UpperCAmelCase : Dict[str, Any] = None , __UpperCAmelCase : Optional[torch.LongTensor] = None , ): # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: a : str = self.norma(__UpperCAmelCase , __UpperCAmelCase) elif self.use_ada_layer_norm_zero: a : List[Any] = self.norma( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hidden_dtype=hidden_states.dtype) else: a : str = self.norma(__UpperCAmelCase) a : int = cross_attention_kwargs if cross_attention_kwargs is not None else {} a : Optional[Any] = self.attna( __UpperCAmelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) if self.use_ada_layer_norm_zero: a : str = gate_msa.unsqueeze(1) * attn_output a : Tuple = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: a : Dict = ( self.norma(__UpperCAmelCase , __UpperCAmelCase) if self.use_ada_layer_norm else self.norma(__UpperCAmelCase) ) a : int = self.attna( __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , **__UpperCAmelCase , ) a : List[str] = attn_output + hidden_states # 3. Feed-forward a : Optional[int] = self.norma(__UpperCAmelCase) if self.use_ada_layer_norm_zero: a : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''') a : Optional[int] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size a : Optional[Any] = torch.cat( [self.ff(__UpperCAmelCase) for hid_slice in norm_hidden_states.chunk(__UpperCAmelCase , dim=self._chunk_dim)] , dim=self._chunk_dim , ) else: a : str = self.ff(__UpperCAmelCase) if self.use_ada_layer_norm_zero: a : Optional[Any] = gate_mlp.unsqueeze(1) * ff_output a : Optional[Any] = ff_output + hidden_states return hidden_states class _A ( nn.Module ): """simple docstring""" def __init__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int = 4 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : str = "geglu" , __UpperCAmelCase : bool = False , ): super().__init__() a : Optional[int] = int(dim * mult) a : Optional[Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": a : Union[str, Any] = GELU(__UpperCAmelCase , __UpperCAmelCase) if activation_fn == "gelu-approximate": a : int = GELU(__UpperCAmelCase , __UpperCAmelCase , approximate="tanh") elif activation_fn == "geglu": a : List[str] = GEGLU(__UpperCAmelCase , __UpperCAmelCase) elif activation_fn == "geglu-approximate": a : int = ApproximateGELU(__UpperCAmelCase , __UpperCAmelCase) a : List[str] = nn.ModuleList([]) # project in self.net.append(__UpperCAmelCase) # project dropout self.net.append(nn.Dropout(__UpperCAmelCase)) # project out self.net.append(nn.Linear(__UpperCAmelCase , __UpperCAmelCase)) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(__UpperCAmelCase)) def __snake_case ( self : int , __UpperCAmelCase : List[Any]): for module in self.net: a : Any = module(__UpperCAmelCase) return hidden_states class _A ( nn.Module ): """simple docstring""" def __init__( self : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : str = "none"): super().__init__() a : Any = nn.Linear(__UpperCAmelCase , __UpperCAmelCase) a : Tuple = approximate def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any]): if gate.device.type != "mps": return F.gelu(__UpperCAmelCase , approximate=self.approximate) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype) def __snake_case ( self : Tuple , __UpperCAmelCase : Union[str, Any]): a : str = self.proj(__UpperCAmelCase) a : Dict = self.gelu(__UpperCAmelCase) return hidden_states class _A ( nn.Module ): """simple docstring""" def __init__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : int): super().__init__() a : Union[str, Any] = nn.Linear(__UpperCAmelCase , dim_out * 2) def __snake_case ( self : List[str] , __UpperCAmelCase : List[str]): if gate.device.type != "mps": return F.gelu(__UpperCAmelCase) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype) def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Tuple = self.proj(__UpperCAmelCase).chunk(2 , dim=-1) return hidden_states * self.gelu(__UpperCAmelCase) class _A ( nn.Module ): """simple docstring""" def __init__( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int): super().__init__() a : Optional[int] = nn.Linear(__UpperCAmelCase , __UpperCAmelCase) def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[Any]): a : List[Any] = self.proj(__UpperCAmelCase) return x * torch.sigmoid(1.702 * x) class _A ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any]): super().__init__() a : List[Any] = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase) a : Dict = nn.SiLU() a : Tuple = nn.Linear(__UpperCAmelCase , embedding_dim * 2) a : Optional[int] = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase) def __snake_case ( self : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str): a : List[Any] = self.linear(self.silu(self.emb(__UpperCAmelCase))) a : Any = torch.chunk(__UpperCAmelCase , 2) a : int = self.norm(__UpperCAmelCase) * (1 + scale) + shift return x class _A ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple): super().__init__() a : Dict = CombinedTimestepLabelEmbeddings(__UpperCAmelCase , __UpperCAmelCase) a : Optional[int] = nn.SiLU() a : Tuple = nn.Linear(__UpperCAmelCase , 6 * embedding_dim , bias=__UpperCAmelCase) a : Dict = nn.LayerNorm(__UpperCAmelCase , elementwise_affine=__UpperCAmelCase , eps=1e-6) def __snake_case ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str=None): a : Union[str, Any] = self.linear(self.silu(self.emb(__UpperCAmelCase , __UpperCAmelCase , hidden_dtype=__UpperCAmelCase))) a : Any = emb.chunk(6 , dim=1) a : str = self.norm(__UpperCAmelCase) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class _A ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : float = 1e-5): super().__init__() a : Union[str, Any] = num_groups a : Optional[int] = eps if act_fn is None: a : Union[str, Any] = None else: a : Tuple = get_activation(__UpperCAmelCase) a : str = nn.Linear(__UpperCAmelCase , out_dim * 2) def __snake_case ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]): if self.act: a : Dict = self.act(__UpperCAmelCase) a : Tuple = self.linear(__UpperCAmelCase) a : int = emb[:, :, None, None] a : List[str] = emb.chunk(2 , dim=1) a : Optional[Any] = F.group_norm(__UpperCAmelCase , self.num_groups , eps=self.eps) a : Union[str, Any] = x * (1 + scale) + shift return x
720
"""simple docstring""" from __future__ import annotations from math import pow, sqrt def lowercase ( A_ , A_ , A_ )-> dict[str, float]: '''simple docstring''' if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance == 0: return {"resistance": sqrt(pow(A_ , 2 ) - pow(A_ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(A_ , 2 ) - pow(A_ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(A_ , 2 ) + pow(A_ , 2 ) )} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
135
0
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class a ( __UpperCAmelCase ): def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __lowerCAmelCase = tempfile.mkdtemp() __lowerCAmelCase = 8 # DPR tok __lowerCAmelCase = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] __lowerCAmelCase = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) __lowerCAmelCase = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok __lowerCAmelCase = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] __lowerCAmelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) __lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] __lowerCAmelCase = {"unk_token": "<unk>"} __lowerCAmelCase = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) __lowerCAmelCase = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["vocab_file"] ) __lowerCAmelCase = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __lowerCAmelCase = os.path.join(self.tmpdirname , "rag_tokenizer" ) __lowerCAmelCase = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) __lowerCAmelCase = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(snake_case__ ) rag_tokenizer.save_pretrained(snake_case__ ) __lowerCAmelCase = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , snake_case__ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __lowerCAmelCase = RagTokenizer.from_pretrained("facebook/rag-token-nq" ) __lowerCAmelCase = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] __lowerCAmelCase = tokenizer(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __lowerCAmelCase = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" ) __lowerCAmelCase = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] __lowerCAmelCase = tokenizer(snake_case__ ) self.assertIsNotNone(snake_case__ )
611
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class a ( __UpperCAmelCase ): lowercase_ : Any = 'wavlm' def __init__( self : List[Any] , snake_case__ : int=32 , snake_case__ : Optional[int]=768 , snake_case__ : int=12 , snake_case__ : Optional[Any]=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : Dict=0.0 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Any=0.1 , snake_case__ : str=0.0_2 , snake_case__ : Dict=1E-5 , snake_case__ : Union[str, Any]="group" , snake_case__ : List[Any]="gelu" , snake_case__ : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : str=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : str=False , snake_case__ : Dict=128 , snake_case__ : List[str]=16 , snake_case__ : Union[str, Any]=320 , snake_case__ : int=800 , snake_case__ : Optional[int]=False , snake_case__ : int=True , snake_case__ : Tuple=0.0_5 , snake_case__ : Any=10 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[Any]=0.0 , snake_case__ : Dict=10 , snake_case__ : Any=320 , snake_case__ : str=2 , snake_case__ : Any=0.1 , snake_case__ : int=100 , snake_case__ : str=256 , snake_case__ : Dict=256 , snake_case__ : List[Any]=0.1 , snake_case__ : Optional[Any]="mean" , snake_case__ : Tuple=False , snake_case__ : Dict=False , snake_case__ : Dict=256 , snake_case__ : Tuple=(512, 512, 512, 512, 1_500) , snake_case__ : Tuple=(5, 3, 3, 1, 1) , snake_case__ : str=(1, 2, 3, 1, 1) , snake_case__ : Any=512 , snake_case__ : List[Any]=80 , snake_case__ : Any=0 , snake_case__ : Tuple=1 , snake_case__ : List[str]=2 , snake_case__ : int=False , snake_case__ : List[str]=3 , snake_case__ : List[str]=2 , snake_case__ : Dict=3 , snake_case__ : Tuple=None , **snake_case__ : Tuple , ): """simple docstring""" super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ ) __lowerCAmelCase = hidden_size __lowerCAmelCase = feat_extract_norm __lowerCAmelCase = feat_extract_activation __lowerCAmelCase = list(snake_case__ ) __lowerCAmelCase = list(snake_case__ ) __lowerCAmelCase = list(snake_case__ ) __lowerCAmelCase = conv_bias __lowerCAmelCase = num_buckets __lowerCAmelCase = max_bucket_distance __lowerCAmelCase = num_conv_pos_embeddings __lowerCAmelCase = num_conv_pos_embedding_groups __lowerCAmelCase = len(self.conv_dim ) __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = num_attention_heads __lowerCAmelCase = hidden_dropout __lowerCAmelCase = attention_dropout __lowerCAmelCase = activation_dropout __lowerCAmelCase = feat_proj_dropout __lowerCAmelCase = final_dropout __lowerCAmelCase = layerdrop __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = initializer_range __lowerCAmelCase = num_ctc_classes __lowerCAmelCase = vocab_size __lowerCAmelCase = do_stable_layer_norm __lowerCAmelCase = use_weighted_layer_sum __lowerCAmelCase = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==" " `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =" F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`," F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCAmelCase = apply_spec_augment __lowerCAmelCase = mask_time_prob __lowerCAmelCase = mask_time_length __lowerCAmelCase = mask_time_min_masks __lowerCAmelCase = mask_feature_prob __lowerCAmelCase = mask_feature_length # parameters for pretraining with codevector quantized representations __lowerCAmelCase = num_codevectors_per_group __lowerCAmelCase = num_codevector_groups __lowerCAmelCase = contrastive_logits_temperature __lowerCAmelCase = num_negatives __lowerCAmelCase = codevector_dim __lowerCAmelCase = proj_codevector_dim __lowerCAmelCase = diversity_loss_weight # ctc loss __lowerCAmelCase = ctc_loss_reduction __lowerCAmelCase = ctc_zero_infinity # adapter __lowerCAmelCase = add_adapter __lowerCAmelCase = adapter_kernel_size __lowerCAmelCase = adapter_stride __lowerCAmelCase = num_adapter_layers __lowerCAmelCase = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __lowerCAmelCase = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __lowerCAmelCase = list(snake_case__ ) __lowerCAmelCase = list(snake_case__ ) __lowerCAmelCase = list(snake_case__ ) __lowerCAmelCase = xvector_output_dim @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
611
1
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def a_ ( lowerCAmelCase_ : str ): __lowerCAmelCase = filter(lambda lowerCAmelCase_ : p.requires_grad, model.parameters() ) __lowerCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params _snake_case : Optional[Any] = logging.getLogger(__name__) def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple ): if metric == "rouge2": __lowerCAmelCase = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": __lowerCAmelCase = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": __lowerCAmelCase = '{val_avg_em:.4f}-{step_count}' elif metric == "loss": __lowerCAmelCase = '{val_avg_loss:.4f}-{step_count}' else: raise NotImplementedError( F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this""" ' function.' ) __lowerCAmelCase = ModelCheckpoint( dirpath=lowerCAmelCase__, filename=lowerCAmelCase__, monitor=F"""val_{metric}""", mode='max', save_top_k=1, every_n_epochs=1, ) return checkpoint_callback def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str] ): return EarlyStopping( monitor=F"""val_{metric}""", mode='min' if 'loss' in metric else 'max', patience=lowerCAmelCase__, verbose=lowerCAmelCase__, ) class _UpperCAmelCase ( pl.Callback ): """simple docstring""" def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]: __lowerCAmelCase = {f"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(UpperCamelCase__ ) @rank_zero_only def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[str]=True ) -> Dict: logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" ) __lowerCAmelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results __lowerCAmelCase = Path(pl_module.hparams.output_dir ) if type_path == "test": __lowerCAmelCase = od / 'test_results.txt' __lowerCAmelCase = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. __lowerCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt""" __lowerCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt""" results_file.parent.mkdir(exist_ok=UpperCamelCase__ ) generations_file.parent.mkdir(exist_ok=UpperCamelCase__ ) with open(UpperCamelCase__ , 'a+' ) as writer: for key in sorted(UpperCamelCase__ ): if key in ["log", "progress_bar", "preds"]: continue __lowerCAmelCase = metrics[key] if isinstance(UpperCamelCase__ , torch.Tensor ): __lowerCAmelCase = val.item() __lowerCAmelCase = f"""{key}: {val:.6f}\n""" writer.write(UpperCamelCase__ ) if not save_generations: return if "preds" in metrics: __lowerCAmelCase = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(UpperCamelCase__ ) @rank_zero_only def lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ) -> Optional[Any]: try: __lowerCAmelCase = pl_module.model.model.num_parameters() except AttributeError: __lowerCAmelCase = pl_module.model.num_parameters() __lowerCAmelCase = count_trainable_parameters(UpperCamelCase__ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} ) @rank_zero_only def lowercase ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any ) -> Optional[int]: save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(UpperCamelCase__ , UpperCamelCase__ , 'test' ) @rank_zero_only def lowercase ( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ) -> List[str]: save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
709
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case : Optional[int] = logging.get_logger(__name__) _snake_case : List[Any] = '▁' _snake_case : Tuple = {'vocab_file': 'spiece.model'} _snake_case : Optional[int] = { 'vocab_file': { 'google/reformer-crime-and-punishment': ( 'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model' ) } } _snake_case : Union[str, Any] = { 'google/reformer-crime-and-punishment': 524288, } class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] def __init__( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : List[Any]=[] , lowerCAmelCase_ : Optional[Dict[str, Any]] = None , **lowerCAmelCase_ : Any , ) -> None: __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , ) __lowerCAmelCase = vocab_file __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowerCAmelCase_ ) @property def lowercase ( self : Any ) -> Any: return self.sp_model.get_piece_size() def lowercase ( self : int ) -> Dict[str, int]: __lowerCAmelCase = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : str ) -> Any: __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None return state def __setstate__( self : Dict , lowerCAmelCase_ : str ) -> str: __lowerCAmelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowercase ( self : int , lowerCAmelCase_ : str ) -> List[str]: return self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ ) def lowercase ( self : Dict , lowerCAmelCase_ : Dict ) -> Tuple: return self.sp_model.piece_to_id(lowerCAmelCase_ ) def lowercase ( self : Any , lowerCAmelCase_ : int ) -> Optional[int]: if index < self.sp_model.get_piece_size(): __lowerCAmelCase = self.sp_model.IdToPiece(lowerCAmelCase_ ) return token def lowercase ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> List[str]: __lowerCAmelCase = [] __lowerCAmelCase = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase_ ) + token __lowerCAmelCase = [] else: current_sub_tokens.append(lowerCAmelCase_ ) out_string += self.sp_model.decode(lowerCAmelCase_ ) return out_string.strip() def lowercase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCAmelCase_ , 'wb' ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase_ ) return (out_vocab_file,)
421
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=lowercase__ ) class UpperCAmelCase__ ( lowercase__ ): """simple docstring""" __UpperCAmelCase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) __UpperCAmelCase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} ) __UpperCAmelCase : ClassVar[Features] = Features( { '''answers''': Sequence( { '''text''': Value('''string''' ), '''answer_start''': Value('''int32''' ), } ) } ) __UpperCAmelCase : str = "question" __UpperCAmelCase : str = "context" __UpperCAmelCase : str = "answers" @property def __lowercase ( self : Tuple ): '''simple docstring''' return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
229
'''simple docstring''' from typing import List, Optional, TypeVar from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .info import DatasetInfo from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets from .splits import NamedSplit from .utils import logging from .utils.py_utils import Literal __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = TypeVar("""DatasetType""", Dataset, IterableDataset) def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[List[float]] = None , __a : Optional[int] = None , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ): """simple docstring""" from .arrow_dataset import Dataset from .iterable_dataset import IterableDataset if not datasets: raise ValueError('Unable to interleave an empty list of datasets.' ) for i, dataset in enumerate(__a ): if not isinstance(__a , (Dataset, IterableDataset) ): if isinstance(__a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(__a )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" ) if i == 0: _a, _a : Tuple = ( (Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset) ) elif not isinstance(__a , __a ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if stopping_strategy not in ["first_exhausted", "all_exhausted"]: raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" ) if dataset_type is Dataset: return _interleave_map_style_datasets( __a , __a , __a , info=__a , split=__a , stopping_strategy=__a ) else: return _interleave_iterable_datasets( __a , __a , __a , info=__a , split=__a , stopping_strategy=__a ) def UpperCAmelCase_ (__a : List[DatasetType] , __a : Optional[DatasetInfo] = None , __a : Optional[NamedSplit] = None , __a : int = 0 , ): """simple docstring""" if not dsets: raise ValueError('Unable to concatenate an empty list of datasets.' ) for i, dataset in enumerate(__a ): if not isinstance(__a , (Dataset, IterableDataset) ): if isinstance(__a , (DatasetDict, IterableDatasetDict) ): if not dataset: raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """ 'is an empty dataset dictionary.' ) raise ValueError( f"""Dataset at position {i} has at least one split: {list(__a )}\n""" f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__a ) )}']""" ) raise ValueError( f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__a ).__name__}.""" ) if i == 0: _a, _a : Dict = ( (Dataset, IterableDataset) if isinstance(__a , __a ) else (IterableDataset, Dataset) ) elif not isinstance(__a , __a ): raise ValueError( f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" ) if dataset_type is Dataset: return _concatenate_map_style_datasets(__a , info=__a , split=__a , axis=__a ) else: return _concatenate_iterable_datasets(__a , info=__a , split=__a , axis=__a )
229
1
import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowercase__ ( _UpperCamelCase ): '''simple docstring''' def UpperCamelCase__ ( self ) -> Dict: """simple docstring""" UpperCamelCase__ : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__magic_name__, '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__magic_name__, '''num_attention_heads''' ) ) class lowercase__ : '''simple docstring''' def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=64, __magic_name__=3, __magic_name__=3, __magic_name__=2, __magic_name__=1, __magic_name__=16, __magic_name__=[128, 256, 384], __magic_name__=[4, 6, 8], __magic_name__=[2, 3, 4], __magic_name__=[16, 16, 16], __magic_name__=0, __magic_name__=[2, 2, 2], __magic_name__=[2, 2, 2], __magic_name__=0.02, __magic_name__=True, __magic_name__=True, __magic_name__=2, ) -> List[Any]: """simple docstring""" UpperCamelCase__ : Optional[Any] = parent UpperCamelCase__ : Dict = batch_size UpperCamelCase__ : str = image_size UpperCamelCase__ : Tuple = num_channels UpperCamelCase__ : Optional[Any] = kernel_size UpperCamelCase__ : List[Any] = stride UpperCamelCase__ : Tuple = padding UpperCamelCase__ : int = hidden_sizes UpperCamelCase__ : Dict = num_attention_heads UpperCamelCase__ : Union[str, Any] = depths UpperCamelCase__ : Dict = key_dim UpperCamelCase__ : int = drop_path_rate UpperCamelCase__ : Any = patch_size UpperCamelCase__ : Optional[Any] = attention_ratio UpperCamelCase__ : List[str] = mlp_ratio UpperCamelCase__ : Union[str, Any] = initializer_range UpperCamelCase__ : Dict = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] UpperCamelCase__ : int = is_training UpperCamelCase__ : Any = use_labels UpperCamelCase__ : Tuple = num_labels UpperCamelCase__ : Optional[Any] = initializer_range def UpperCamelCase__ ( self ) -> List[Any]: """simple docstring""" UpperCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase__ : Optional[Any] = None if self.use_labels: UpperCamelCase__ : int = ids_tensor([self.batch_size], self.num_labels ) UpperCamelCase__ : Tuple = self.get_config() return config, pixel_values, labels def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" return LevitConfig( image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, ) def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]: """simple docstring""" UpperCamelCase__ : Tuple = LevitModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCamelCase__ : str = model(__magic_name__ ) UpperCamelCase__ : Optional[int] = (self.image_size, self.image_size) UpperCamelCase__ : str = image_size[0], image_size[1] for _ in range(4 ): UpperCamelCase__ : Optional[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) UpperCamelCase__ : Optional[int] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), ) def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> int: """simple docstring""" UpperCamelCase__ : Dict = self.num_labels UpperCamelCase__ : Tuple = LevitForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCamelCase__ : int = model(__magic_name__, labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def UpperCamelCase__ ( self ) -> List[str]: """simple docstring""" UpperCamelCase__ : int = self.prepare_config_and_inputs() UpperCamelCase__ : Tuple = config_and_inputs UpperCamelCase__ : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowercase__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' a : int = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) a : Dict = ( { "feature-extraction": LevitModel, "image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) a : List[Any] = False a : Optional[Any] = False a : Any = False a : Union[str, Any] = False a : Any = False def UpperCamelCase__ ( self ) -> str: """simple docstring""" UpperCamelCase__ : Union[str, Any] = LevitModelTester(self ) UpperCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=__magic_name__, has_text_modality=__magic_name__, hidden_size=37 ) def UpperCamelCase__ ( self ) -> Any: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ ( self ) -> Any: """simple docstring""" return @unittest.skip(reason='''Levit does not use inputs_embeds''' ) def UpperCamelCase__ ( self ) -> Any: """simple docstring""" pass @unittest.skip(reason='''Levit does not support input and output embeddings''' ) def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='''Levit does not output attentions''' ) def UpperCamelCase__ ( self ) -> List[Any]: """simple docstring""" pass def UpperCamelCase__ ( self ) -> str: """simple docstring""" UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Dict = model_class(__magic_name__ ) UpperCamelCase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase__ : Tuple = [*signature.parameters.keys()] UpperCamelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1], __magic_name__ ) def UpperCamelCase__ ( self ) -> Any: """simple docstring""" def check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ): UpperCamelCase__ : List[Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): UpperCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(__magic_name__, __magic_name__ ) ) UpperCamelCase__ : Dict = outputs.hidden_states UpperCamelCase__ : str = len(self.model_tester.depths ) + 1 self.assertEqual(len(__magic_name__ ), __magic_name__ ) UpperCamelCase__ : Optional[int] = (self.model_tester.image_size, self.model_tester.image_size) UpperCamelCase__ : Any = image_size[0], image_size[1] for _ in range(4 ): UpperCamelCase__ : int = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) UpperCamelCase__ : List[Any] = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ), [ height * width, self.model_tester.hidden_sizes[0], ], ) UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ : Optional[int] = True check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase__ : int = True check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCamelCase__ ( self ) -> List[str]: """simple docstring""" pass def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=False ) -> Union[str, Any]: """simple docstring""" UpperCamelCase__ : Tuple = super()._prepare_for_class(__magic_name__, __magic_name__, return_labels=__magic_name__ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def UpperCamelCase__ ( self ) -> int: """simple docstring""" UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCamelCase__ ( self ) -> str: """simple docstring""" UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) def UpperCamelCase__ ( self ) -> Optional[Any]: """simple docstring""" if not self.model_tester.is_training: return UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ : Any = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(__magic_name__ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue UpperCamelCase__ : Union[str, Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.train() UpperCamelCase__ : int = self._prepare_for_class(__magic_name__, __magic_name__, return_labels=__magic_name__ ) UpperCamelCase__ : Tuple = model(**__magic_name__ ).loss loss.backward() def UpperCamelCase__ ( self ) -> int: """simple docstring""" UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCamelCase__ : Dict = False UpperCamelCase__ : int = True for model_class in self.all_model_classes: if model_class in get_values(__magic_name__ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue UpperCamelCase__ : Union[str, Any] = model_class(__magic_name__ ) model.gradient_checkpointing_enable() model.to(__magic_name__ ) model.train() UpperCamelCase__ : Dict = self._prepare_for_class(__magic_name__, __magic_name__, return_labels=__magic_name__ ) UpperCamelCase__ : List[str] = model(**__magic_name__ ).loss loss.backward() def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase__ : Union[str, Any] = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(__magic_name__ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ): UpperCamelCase__ : int = problem_type["title"] UpperCamelCase__ : Optional[int] = problem_type["num_labels"] UpperCamelCase__ : List[str] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.train() UpperCamelCase__ : List[Any] = self._prepare_for_class(__magic_name__, __magic_name__, return_labels=__magic_name__ ) if problem_type["num_labels"] > 1: UpperCamelCase__ : Dict = inputs["labels"].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] ) UpperCamelCase__ : str = inputs["labels"].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=__magic_name__ ) as warning_list: UpperCamelCase__ : List[str] = model(**__magic_name__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"Something is going wrong in the regression problem: intercepted {w.message}" ) loss.backward() @slow def UpperCamelCase__ ( self ) -> List[Any]: """simple docstring""" for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ : Optional[Any] = LevitModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCAmelCase_ ( ) -> Optional[int]: UpperCamelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowercase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ ( self ) -> Tuple: """simple docstring""" return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCamelCase__ ( self ) -> Optional[int]: """simple docstring""" UpperCamelCase__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( __magic_name__ ) UpperCamelCase__ : List[Any] = self.default_image_processor UpperCamelCase__ : Any = prepare_img() UpperCamelCase__ : Optional[Any] = image_processor(images=__magic_name__, return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): UpperCamelCase__ : Dict = model(**__magic_name__ ) # verify the logits UpperCamelCase__ : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape, __magic_name__ ) UpperCamelCase__ : Dict = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3], __magic_name__, atol=1E-4 ) )
715
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase_ = logging.get_logger(__name__) class lowercase__ ( __lowerCamelCase ): '''simple docstring''' a : Any = "upernet" def __init__( self, __magic_name__=None, __magic_name__=512, __magic_name__=0.02, __magic_name__=[1, 2, 3, 6], __magic_name__=True, __magic_name__=0.4, __magic_name__=384, __magic_name__=256, __magic_name__=1, __magic_name__=False, __magic_name__=255, **__magic_name__, ) -> Dict: """simple docstring""" super().__init__(**__magic_name__ ) if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) UpperCamelCase__ : str = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) elif isinstance(__magic_name__, __magic_name__ ): UpperCamelCase__ : Tuple = backbone_config.get('''model_type''' ) UpperCamelCase__ : List[str] = CONFIG_MAPPING[backbone_model_type] UpperCamelCase__ : Optional[int] = config_class.from_dict(__magic_name__ ) UpperCamelCase__ : Dict = backbone_config UpperCamelCase__ : int = hidden_size UpperCamelCase__ : int = initializer_range UpperCamelCase__ : Dict = pool_scales UpperCamelCase__ : Tuple = use_auxiliary_head UpperCamelCase__ : Tuple = auxiliary_loss_weight UpperCamelCase__ : Optional[int] = auxiliary_in_channels UpperCamelCase__ : Optional[Any] = auxiliary_channels UpperCamelCase__ : Any = auxiliary_num_convs UpperCamelCase__ : Union[str, Any] = auxiliary_concat_input UpperCamelCase__ : List[str] = loss_ignore_index def UpperCamelCase__ ( self ) -> Any: """simple docstring""" UpperCamelCase__ : List[str] = copy.deepcopy(self.__dict__ ) UpperCamelCase__ : int = self.backbone_config.to_dict() UpperCamelCase__ : Any = self.__class__.model_type return output
369
0
'''simple docstring''' def lowercase_ ( _lowercase , _lowercase ) -> int: '''simple docstring''' return number | (1 << position) def lowercase_ ( _lowercase , _lowercase ) -> int: '''simple docstring''' return number & ~(1 << position) def lowercase_ ( _lowercase , _lowercase ) -> int: '''simple docstring''' return number ^ (1 << position) def lowercase_ ( _lowercase , _lowercase ) -> bool: '''simple docstring''' return ((number >> position) & 1) == 1 def lowercase_ ( _lowercase , _lowercase ) -> int: '''simple docstring''' return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
422
'''simple docstring''' def lowercase_ ( _lowercase ) -> int: '''simple docstring''' if not isinstance(_lowercase , _lowercase ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) lowerCamelCase_ : str = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
422
1
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]: _UpperCamelCase , _UpperCamelCase = len(snake_case__ ), len(grid[0] ) if ( min(snake_case__ , snake_case__ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCamelCase = 0 count += depth_first_search(snake_case__ , row + 1 , snake_case__ , snake_case__ ) count += depth_first_search(snake_case__ , row - 1 , snake_case__ , snake_case__ ) count += depth_first_search(snake_case__ , snake_case__ , col + 1 , snake_case__ ) count += depth_first_search(snake_case__ , snake_case__ , col - 1 , snake_case__ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
700
import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPTaTokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers from transformers.tokenization_utils import Trie sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class __A( unittest.TestCase ): def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = mock.Mock() _UpperCamelCase = 500 _UpperCamelCase = {} _UpperCamelCase = HTTPError _UpperCamelCase = {} # Download this model to make sure it's in the cache. _UpperCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''', return_value=A ) as mock_head: _UpperCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = mock.Mock() _UpperCamelCase = 500 _UpperCamelCase = {} _UpperCamelCase = HTTPError _UpperCamelCase = {} # Download this model to make sure it's in the cache. _UpperCamelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''', return_value=A ) as mock_head: _UpperCamelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' ) # This check we did call the fake head request mock_head.assert_called() def _UpperCamelCase ( self ): """simple docstring""" try: _UpperCamelCase = tempfile.mktemp() with open(A, '''wb''' ) as f: http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', A ) _UpperCamelCase = AlbertTokenizer.from_pretrained(A ) finally: os.remove(A ) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile('''tokenizer.json''' ): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. return try: with open('''tokenizer.json''', '''wb''' ) as f: http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''', A ) _UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) # The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1000 ) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove('''tokenizer.json''' ) def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' ) @is_staging_test class __A( unittest.TestCase ): __A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def _UpperCamelCase ( cls ): """simple docstring""" _UpperCamelCase = TOKEN HfFolder.save_token(A ) @classmethod def _UpperCamelCase ( cls ): """simple docstring""" try: delete_repo(token=cls._token, repo_id='''test-tokenizer''' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='''valid_org/test-tokenizer-org''' ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id='''test-dynamic-tokenizer''' ) except HTTPError: pass def _UpperCamelCase ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = os.path.join(A, '''vocab.txt''' ) with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) _UpperCamelCase = BertTokenizer(A ) tokenizer.push_to_hub('''test-tokenizer''', use_auth_token=self._token ) _UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) # Reset repo delete_repo(token=self._token, repo_id='''test-tokenizer''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(A, repo_id='''test-tokenizer''', push_to_hub=A, use_auth_token=self._token ) _UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) def _UpperCamelCase ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = os.path.join(A, '''vocab.txt''' ) with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) _UpperCamelCase = BertTokenizer(A ) tokenizer.push_to_hub('''valid_org/test-tokenizer-org''', use_auth_token=self._token ) _UpperCamelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) # Reset repo delete_repo(token=self._token, repo_id='''valid_org/test-tokenizer-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained( A, repo_id='''valid_org/test-tokenizer-org''', push_to_hub=A, use_auth_token=self._token ) _UpperCamelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' ) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab ) @require_tokenizers def _UpperCamelCase ( self ): """simple docstring""" CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = os.path.join(A, '''vocab.txt''' ) with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) _UpperCamelCase = CustomTokenizer(A ) # No fast custom tokenizer tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token ) _UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=A ) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' ) # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = os.path.join(A, '''vocab.txt''' ) with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) _UpperCamelCase = BertTokenizerFast.from_pretrained(A ) bert_tokenizer.save_pretrained(A ) _UpperCamelCase = CustomTokenizerFast.from_pretrained(A ) tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token ) _UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=A ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizerFast''' ) _UpperCamelCase = AutoTokenizer.from_pretrained( F'''{USER}/test-dynamic-tokenizer''', use_fast=A, trust_remote_code=A ) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' ) class __A( unittest.TestCase ): def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = Trie() trie.add('''Hello 友達''' ) self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) trie.add('''Hello''' ) trie.data self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} ) def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = Trie() self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS] This is a extra_id_100'''] ) trie.add('''[CLS]''' ) trie.add('''extra_id_1''' ) trie.add('''extra_id_100''' ) self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] ) def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = Trie() trie.add('''A''' ) self.assertEqual(trie.split('''ABC''' ), ['''A''', '''BC'''] ) self.assertEqual(trie.split('''BCA''' ), ['''BC''', '''A'''] ) def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = Trie() trie.add('''TOKEN]''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = Trie() trie.add('''A''' ) trie.add('''P''' ) trie.add('''[SPECIAL_TOKEN]''' ) self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] ) def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = Trie() trie.add('''AB''' ) trie.add('''B''' ) trie.add('''C''' ) self.assertEqual(trie.split('''ABC''' ), ['''AB''', '''C'''] ) def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = Trie() trie.add('''ABC''' ) trie.add('''B''' ) trie.add('''CD''' ) self.assertEqual(trie.split('''ABCD''' ), ['''ABC''', '''D'''] ) def _UpperCamelCase ( self ): """simple docstring""" _UpperCamelCase = Trie() _UpperCamelCase = trie.cut_text('''ABC''', [0, 0, 2, 1, 2, 3] ) self.assertEqual(A, ['''AB''', '''C'''] )
105
0
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def a__ ( *lowercase__ ): '''simple docstring''' with open(lowercase__ , "r" ) as fh: fcntl.flock(lowercase__ , fcntl.LOCK_EX ) try: print(*lowercase__ ) finally: fcntl.flock(lowercase__ , fcntl.LOCK_UN ) __lowercase : str =int(os.environ["""LOCAL_RANK"""]) torch.cuda.set_device(local_rank) __lowercase : Any =torch.device("""cuda""", local_rank) __lowercase : Optional[Any] =socket.gethostname() __lowercase : Tuple =f"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("""nccl""") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowercase : Optional[Any] =dist.get_rank() __lowercase : Any =dist.get_world_size() printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(f"""{gpu} is broken""") raise
54
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Tuple = { "post_extract_proj": "feature_projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.upsample.0": "encoder.upsample.projection", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: '''simple docstring''' for attribute in key.split(""".""" ): lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase ) if weight_type is not None: lowercase_ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape else: lowercase_ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ = value elif weight_type == "weight_g": lowercase_ = value elif weight_type == "weight_v": lowercase_ = value elif weight_type == "bias": lowercase_ = value else: lowercase_ = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]: '''simple docstring''' lowercase_ = [] lowercase_ = fairseq_model.state_dict() lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase_ = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) lowercase_ = True else: for key, mapped_key in MAPPING.items(): lowercase_ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: lowercase_ = True if "*" in mapped_key: lowercase_ = name.split(__lowerCAmelCase )[0].split(""".""" )[-2] lowercase_ = mapped_key.replace("""*""" , __lowerCAmelCase ) if "weight_g" in name: lowercase_ = """weight_g""" elif "weight_v" in name: lowercase_ = """weight_v""" elif "weight" in name: lowercase_ = """weight""" elif "bias" in name: lowercase_ = """bias""" else: lowercase_ = None set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) continue if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str: '''simple docstring''' lowercase_ = full_name.split("""conv_layers.""" )[-1] lowercase_ = name.split(""".""" ) lowercase_ = int(items[0] ) lowercase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase_ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ = SEWConfig() if is_finetuned: lowercase_ = model.wav_encoder.wav_model.cfg else: lowercase_ = model.cfg lowercase_ = fs_config.conv_bias lowercase_ = eval(fs_config.conv_feature_layers ) lowercase_ = [x[0] for x in conv_layers] lowercase_ = [x[1] for x in conv_layers] lowercase_ = [x[2] for x in conv_layers] lowercase_ = """gelu""" lowercase_ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" lowercase_ = 0.0 lowercase_ = fs_config.activation_fn.name lowercase_ = fs_config.encoder_embed_dim lowercase_ = 0.02 lowercase_ = fs_config.encoder_ffn_embed_dim lowercase_ = 1E-5 lowercase_ = fs_config.encoder_layerdrop lowercase_ = fs_config.encoder_attention_heads lowercase_ = fs_config.conv_pos_groups lowercase_ = fs_config.conv_pos lowercase_ = len(__lowerCAmelCase ) lowercase_ = fs_config.encoder_layers lowercase_ = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: lowercase_ = model.cfg lowercase_ = fs_config.final_dropout lowercase_ = fs_config.layerdrop lowercase_ = fs_config.activation_dropout lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 lowercase_ = fs_config.attention_dropout lowercase_ = fs_config.dropout_input lowercase_ = fs_config.dropout lowercase_ = fs_config.mask_channel_length lowercase_ = fs_config.mask_channel_prob lowercase_ = fs_config.mask_length lowercase_ = fs_config.mask_prob lowercase_ = """Wav2Vec2FeatureExtractor""" lowercase_ = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True ) -> Union[str, Any]: '''simple docstring''' if is_finetuned: lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: lowercase_ = SEWConfig.from_pretrained(__lowerCAmelCase ) else: lowercase_ = convert_config(model[0] , __lowerCAmelCase ) lowercase_ = model[0].eval() lowercase_ = True if config.feat_extract_norm == """layer""" else False lowercase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , ) if is_finetuned: if dict_path: lowercase_ = Dictionary.load(__lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase_ = target_dict.pad_index lowercase_ = target_dict.bos_index lowercase_ = target_dict.pad_index lowercase_ = target_dict.bos_index lowercase_ = target_dict.eos_index lowercase_ = len(target_dict.symbols ) lowercase_ = os.path.join(__lowerCAmelCase , """vocab.json""" ) if not os.path.isdir(__lowerCAmelCase ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) ) return os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase ) with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , __lowerCAmelCase ) lowercase_ = WavaVecaCTCTokenizer( __lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , ) lowercase_ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase ) processor.save_pretrained(__lowerCAmelCase ) lowercase_ = SEWForCTC(__lowerCAmelCase ) else: lowercase_ = SEWModel(__lowerCAmelCase ) feature_extractor.save_pretrained(__lowerCAmelCase ) recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) hf_model.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) UpperCAmelCase : str = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
567
0
"""simple docstring""" import numpy class _UpperCAmelCase : def __init__( self , lowercase_ , lowercase_ ) -> None: UpperCAmelCase = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. UpperCAmelCase = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. UpperCAmelCase = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. UpperCAmelCase = numpy.random.rand(3 , 1 ) # Real output values provided. UpperCAmelCase = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. UpperCAmelCase = numpy.zeros(output_array.shape ) def a_ ( self ) -> numpy.ndarray: UpperCAmelCase = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. UpperCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. UpperCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def a_ ( self ) -> None: UpperCAmelCase = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) UpperCAmelCase = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) UpperCAmelCase = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> None: for iteration in range(1 , iterations + 1 ): UpperCAmelCase = self.feedforward() self.back_propagation() if give_loss: UpperCAmelCase = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F"Iteration {iteration} Loss: {loss}" ) def a_ ( self , lowercase_ ) -> int: UpperCAmelCase = input_arr UpperCAmelCase = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) UpperCAmelCase = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) UpperCAmelCase = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def lowercase__ ( lowerCAmelCase : numpy.ndarray ) -> numpy.ndarray: """simple docstring""" return 1 / (1 + numpy.exp(-value )) def lowercase__ ( lowerCAmelCase : numpy.ndarray ) -> numpy.ndarray: """simple docstring""" return (value) * (1 - (value)) def lowercase__ ( ) -> int: """simple docstring""" UpperCAmelCase = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. UpperCAmelCase = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. UpperCAmelCase = TwoHiddenLayerNeuralNetwork( input_array=lowerCAmelCase , output_array=lowerCAmelCase ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=lowerCAmelCase , iterations=10 , give_loss=lowerCAmelCase ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
183
"""simple docstring""" import sys import turtle def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] ) -> tuple[float, float]: """simple docstring""" return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : int , ) -> None: """simple docstring""" my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 ) triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 ) triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) SCREAMING_SNAKE_CASE_ = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
183
1
def UpperCamelCase_( lowerCamelCase_ ) -> tuple[int, int]: try: _lowercase : int = float(lowerCamelCase_ ) except ValueError: raise ValueError('Please enter a valid number' ) _lowercase : Optional[int] = decimal - int(lowerCamelCase_ ) if fractional_part == 0: return int(lowerCamelCase_ ), 1 else: _lowercase : Dict = len(str(lowerCamelCase_ ).split('.' )[1] ) _lowercase : Any = int(decimal * (10**number_of_frac_digits) ) _lowercase : Tuple = 10**number_of_frac_digits _lowercase , _lowercase : Tuple = denominator, numerator while True: _lowercase : List[Any] = dividend % divisor if remainder == 0: break _lowercase , _lowercase : Optional[int] = divisor, remainder _lowercase , _lowercase : Optional[Any] = numerator / divisor, denominator / divisor return int(lowerCamelCase_ ), int(lowerCamelCase_ ) if __name__ == "__main__": print(F"{decimal_to_fraction(2) = }") print(F"{decimal_to_fraction(89.0) = }") print(F"{decimal_to_fraction('67') = }") print(F"{decimal_to_fraction('45.0') = }") print(F"{decimal_to_fraction(1.5) = }") print(F"{decimal_to_fraction('6.25') = }") print(F"{decimal_to_fraction('78td') = }")
89
'''simple docstring''' from __future__ import annotations from math import pi, sqrt def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float ) -> tuple: if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative" ) elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
418
0
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __UpperCAmelCase = data_utils.TransfoXLTokenizer __UpperCAmelCase = data_utils.TransfoXLCorpus __UpperCAmelCase = data_utils __UpperCAmelCase = data_utils def lowercase__ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str] ) -> Optional[int]: '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowerCAmelCase__ , "rb" ) as fp: a__ : str = pickle.load(lowerCAmelCase__ , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) a__ : Optional[Any] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F"Save vocabulary to {pytorch_vocab_dump_path}" ) a__ : Dict = corpus.vocab.__dict__ torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) a__ : List[str] = corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , lowerCAmelCase__ ) a__ : Optional[int] = pytorch_dump_folder_path + "/" + CORPUS_NAME print(F"Save dataset to {pytorch_dataset_dump_path}" ) torch.save(lowerCAmelCase__ , lowerCAmelCase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model a__ : List[str] = os.path.abspath(lowerCAmelCase__ ) a__ : int = os.path.abspath(lowerCAmelCase__ ) print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." ) # Initialise PyTorch model if transfo_xl_config_file == "": a__ : str = TransfoXLConfig() else: a__ : Tuple = TransfoXLConfig.from_json_file(lowerCAmelCase__ ) print(F"Building PyTorch model from configuration: {config}" ) a__ : List[str] = TransfoXLLMHeadModel(lowerCAmelCase__ ) a__ : str = load_tf_weights_in_transfo_xl(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # Save pytorch-model a__ : Union[str, Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) a__ : Any = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) print(F"Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}" ) torch.save(model.state_dict() , lowerCAmelCase__ ) print(F"Save configuration file to {os.path.abspath(lowerCAmelCase__ )}" ) with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) __UpperCAmelCase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
251
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class __UpperCAmelCase ( _UpperCamelCase ): def __init__( self : int , *a_ : List[str] , **a_ : Any ) -> None: '''simple docstring''' warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , a_ , ) super().__init__(*a_ , **a_ )
251
1
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _SCREAMING_SNAKE_CASE = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ) -> Tuple: for attribute in key.split(""".""" ): snake_case = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) if weight_type is not None: snake_case = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape else: snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case = value elif weight_type == "weight_g": snake_case = value elif weight_type == "weight_v": snake_case = value elif weight_type == "bias": snake_case = value else: snake_case = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __lowerCamelCase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Optional[int]: snake_case = [] snake_case = fairseq_model.state_dict() snake_case = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , ) snake_case = True else: for key, mapped_key in MAPPING.items(): snake_case = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key): # special case since naming is very similar continue snake_case = True if "*" in mapped_key: snake_case = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2] snake_case = mapped_key.replace("""*""" , UpperCAmelCase__ ) if "weight_g" in name: snake_case = """weight_g""" elif "weight_v" in name: snake_case = """weight_v""" elif "bias" in name: snake_case = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case = """weight""" else: snake_case = None set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Any ) -> Dict: snake_case = full_name.split("""conv_layers.""" )[-1] snake_case = name.split(""".""" ) snake_case = int(items[0] ) snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(UpperCAmelCase__ ) @torch.no_grad() def __lowerCamelCase ( __lowerCAmelCase : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict=None , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=True ) -> List[Any]: if config_path is not None: snake_case = UniSpeechSatConfig.from_pretrained(UpperCAmelCase__ ) else: snake_case = UniSpeechSatConfig() snake_case = """""" if is_finetuned: snake_case = UniSpeechSatForCTC(UpperCAmelCase__ ) else: snake_case = UniSpeechSatForPreTraining(UpperCAmelCase__ ) snake_case , snake_case , snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) snake_case = model[0].eval() recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
369
import os from argparse import ArgumentParser, Namespace from ..data import SingleSentenceClassificationProcessor as Processor from ..pipelines import TextClassificationPipeline from ..utils import is_tf_available, is_torch_available, logging from . import BaseTransformersCLICommand if not is_tf_available() and not is_torch_available(): raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training') # TF training parameters a = False a = False def UpperCAmelCase_ ( UpperCAmelCase__ ): return TrainCommand(UpperCAmelCase__ ) class UpperCamelCase__ ( __magic_name__ ): @staticmethod def UpperCAmelCase__ ( UpperCamelCase__ : ArgumentParser ): '''simple docstring''' lowercase_ = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" ) train_parser.add_argument( """--train_data""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , ) train_parser.add_argument( """--column_label""" , type=UpperCamelCase__ , default=0 , help="""Column of the dataset csv file with example labels.""" ) train_parser.add_argument( """--column_text""" , type=UpperCamelCase__ , default=1 , help="""Column of the dataset csv file with example texts.""" ) train_parser.add_argument( """--column_id""" , type=UpperCamelCase__ , default=2 , help="""Column of the dataset csv file with example ids.""" ) train_parser.add_argument( """--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" ) train_parser.add_argument("""--validation_data""" , type=UpperCamelCase__ , default="""""" , help="""path to validation dataset.""" ) train_parser.add_argument( """--validation_split""" , type=UpperCamelCase__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , ) train_parser.add_argument("""--output""" , type=UpperCamelCase__ , default="""./""" , help="""path to saved the trained model.""" ) train_parser.add_argument( """--task""" , type=UpperCamelCase__ , default="""text_classification""" , help="""Task to train the model on.""" ) train_parser.add_argument( """--model""" , type=UpperCamelCase__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" ) train_parser.add_argument("""--train_batch_size""" , type=UpperCamelCase__ , default=32 , help="""Batch size for training.""" ) train_parser.add_argument("""--valid_batch_size""" , type=UpperCamelCase__ , default=64 , help="""Batch size for validation.""" ) train_parser.add_argument("""--learning_rate""" , type=UpperCamelCase__ , default=3e-5 , help="""Learning rate.""" ) train_parser.add_argument("""--adam_epsilon""" , type=UpperCamelCase__ , default=1e-08 , help="""Epsilon for Adam optimizer.""" ) train_parser.set_defaults(func=UpperCamelCase__ ) def __init__( self : Union[str, Any] , UpperCamelCase__ : Namespace ): '''simple docstring''' lowercase_ = logging.get_logger("""transformers-cli/training""" ) lowercase_ = """tf""" if is_tf_available() else """torch""" os.makedirs(args.output , exist_ok=UpperCamelCase__ ) lowercase_ = args.output lowercase_ = args.column_label lowercase_ = args.column_text lowercase_ = args.column_id self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' ) if args.task == "text_classification": lowercase_ = TextClassificationPipeline.from_pretrained(args.model ) elif args.task == "token_classification": raise NotImplementedError elif args.task == "question_answering": raise NotImplementedError self.logger.info(F'''Loading dataset from {args.train_data}''' ) lowercase_ = Processor.create_from_csv( args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowercase_ = None if args.validation_data: self.logger.info(F'''Loading validation dataset from {args.validation_data}''' ) lowercase_ = Processor.create_from_csv( args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , ) lowercase_ = args.validation_split lowercase_ = args.train_batch_size lowercase_ = args.valid_batch_size lowercase_ = args.learning_rate lowercase_ = args.adam_epsilon def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' if self.framework == "tf": return self.run_tf() return self.run_torch() def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' raise NotImplementedError def UpperCAmelCase__ ( self : Optional[Any] ): '''simple docstring''' self.pipeline.fit( self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , ) # Save trained pipeline self.pipeline.save_pretrained(self.output )
412
0
class __UpperCAmelCase : def __init__( self: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[Any] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = graph self._normalize_graph(UpperCAmelCase_ , UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = None def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any ): '''simple docstring''' if sources is int: _SCREAMING_SNAKE_CASE = [sources] if sinks is int: _SCREAMING_SNAKE_CASE = [sinks] if len(UpperCAmelCase_ ) == 0 or len(UpperCAmelCase_ ) == 0: return _SCREAMING_SNAKE_CASE = sources[0] _SCREAMING_SNAKE_CASE = sinks[0] # make fake vertex if there are more # than one source or sink if len(UpperCAmelCase_ ) > 1 or len(UpperCAmelCase_ ) > 1: _SCREAMING_SNAKE_CASE = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _SCREAMING_SNAKE_CASE = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _SCREAMING_SNAKE_CASE = max_input_flow _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _SCREAMING_SNAKE_CASE = max_input_flow _SCREAMING_SNAKE_CASE = size - 1 def UpperCamelCase ( self: Union[str, Any] ): '''simple docstring''' if self.maximum_flow_algorithm is None: raise Exception("""You need to set maximum flow algorithm before.""" ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = algorithm(self ) class __UpperCAmelCase : def __init__( self: Optional[Any] , UpperCAmelCase_: str ): '''simple docstring''' _SCREAMING_SNAKE_CASE = flow_network _SCREAMING_SNAKE_CASE = flow_network.verticesCount _SCREAMING_SNAKE_CASE = flow_network.sourceIndex _SCREAMING_SNAKE_CASE = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _SCREAMING_SNAKE_CASE = flow_network.graph _SCREAMING_SNAKE_CASE = False def UpperCamelCase ( self: List[str] ): '''simple docstring''' if not self.executed: self._algorithm() _SCREAMING_SNAKE_CASE = True def UpperCamelCase ( self: Dict ): '''simple docstring''' pass class __UpperCAmelCase (_UpperCAmelCase ): def __init__( self: Union[str, Any] , UpperCAmelCase_: int ): '''simple docstring''' super().__init__(UpperCAmelCase_ ) # use this to save your result _SCREAMING_SNAKE_CASE = -1 def UpperCamelCase ( self: Tuple ): '''simple docstring''' if not self.executed: raise Exception("""You should execute algorithm before using its result!""" ) return self.maximum_flow class __UpperCAmelCase (_UpperCAmelCase ): def __init__( self: List[str] , UpperCAmelCase_: Optional[Any] ): '''simple docstring''' super().__init__(UpperCAmelCase_ ) _SCREAMING_SNAKE_CASE = [[0] * self.verticies_count for i in range(self.verticies_count )] _SCREAMING_SNAKE_CASE = [0] * self.verticies_count _SCREAMING_SNAKE_CASE = [0] * self.verticies_count def UpperCamelCase ( self: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _SCREAMING_SNAKE_CASE = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _SCREAMING_SNAKE_CASE = 0 while i < len(UpperCAmelCase_ ): _SCREAMING_SNAKE_CASE = vertices_list[i] _SCREAMING_SNAKE_CASE = self.heights[vertex_index] self.process_vertex(UpperCAmelCase_ ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase_ ) ) _SCREAMING_SNAKE_CASE = 0 else: i += 1 _SCREAMING_SNAKE_CASE = sum(self.preflow[self.source_index] ) def UpperCamelCase ( self: str , UpperCAmelCase_: int ): '''simple docstring''' while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(UpperCAmelCase_ , UpperCAmelCase_ ) self.relabel(UpperCAmelCase_ ) def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: List[str] ): '''simple docstring''' _SCREAMING_SNAKE_CASE = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _SCREAMING_SNAKE_CASE = self.heights[to_index] if min_height is not None: _SCREAMING_SNAKE_CASE = min_height + 1 if __name__ == "__main__": UpperCamelCase = [0] UpperCamelCase = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] UpperCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network UpperCamelCase = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate UpperCamelCase = flow_network.find_maximum_flow() print(f"maximum flow is {maximum_flow}")
569
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch UpperCamelCase = '''sshleifer/bart-tiny-random''' UpperCamelCase = '''patrickvonplaten/t5-tiny-random''' @require_torch class __UpperCAmelCase (unittest.TestCase ): @cached_property def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase_ ) def UpperCamelCase ( self: int ): '''simple docstring''' _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def UpperCamelCase ( self: Optional[int] ): '''simple docstring''' _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase_ ) def UpperCamelCase ( self: Any ): '''simple docstring''' _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def UpperCamelCase ( self: Dict ): '''simple docstring''' _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def UpperCamelCase ( self: List[Any] ): '''simple docstring''' with self.assertRaises(UpperCAmelCase_ ): create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=UpperCAmelCase_ , d=UpperCAmelCase_ )
569
1
import unittest from knapsack import greedy_knapsack as kp class _lowerCamelCase( unittest.TestCase ): def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Optional[Any] = [10, 20, 30, 40, 50, 60] _lowercase : Optional[int] = [2, 4, 6, 8, 10, 12] _lowercase : Optional[int] = 1_00 self.assertEqual(kp.calc_profit(lowerCamelCase, lowerCamelCase, lowerCamelCase), 2_10) def UpperCamelCase ( self) -> List[Any]: """simple docstring""" self.assertRaisesRegex(lowerCamelCase, 'max_weight must greater than zero.') def UpperCamelCase ( self) -> Any: """simple docstring""" self.assertRaisesRegex(lowerCamelCase, 'Weight can not be negative.') def UpperCamelCase ( self) -> List[str]: """simple docstring""" self.assertRaisesRegex(lowerCamelCase, 'Profit can not be negative.') def UpperCamelCase ( self) -> List[str]: """simple docstring""" self.assertRaisesRegex(lowerCamelCase, 'max_weight must greater than zero.') def UpperCamelCase ( self) -> List[str]: """simple docstring""" self.assertRaisesRegex( lowerCamelCase, 'The length of profit and weight must be same.') if __name__ == "__main__": unittest.main()
89
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return getitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" return setitem, k, v def lowerCamelCase_ ( _UpperCamelCase ) -> Tuple: """simple docstring""" return delitem, k def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) -> str: """simple docstring""" try: return fun(_UpperCamelCase , *_UpperCamelCase ), None except Exception as e: return None, e lowerCAmelCase_ = ( _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), ) lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_a''', '''val_b'''), ] lowerCAmelCase_ = [ _set('''key_a''', '''val_a'''), _set('''key_b''', '''val_b'''), _del('''key_a'''), _del('''key_b'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), ] lowerCAmelCase_ = [ _get('''key_a'''), _del('''key_a'''), _set('''key_a''', '''val_a'''), _del('''key_a'''), _del('''key_a'''), _get('''key_a'''), ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowerCAmelCase_ = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('''key_a''', '''val_b'''), ] @pytest.mark.parametrize( '''operations''' , ( pytest.param(_add_items , id='''add items''' ), pytest.param(_overwrite_items , id='''overwrite items''' ), pytest.param(_delete_items , id='''delete items''' ), pytest.param(_access_absent_items , id='''access absent items''' ), pytest.param(_add_with_resize_up , id='''add with resize up''' ), pytest.param(_add_with_resize_down , id='''add with resize down''' ), ) , ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" snake_case_ : Any = HashMap(initial_block_size=4 ) snake_case_ : Union[str, Any] = {} for _, (fun, *args) in enumerate(_UpperCamelCase ): snake_case_ , snake_case_ : str = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) snake_case_ , snake_case_ : List[Any] = _run_operation(_UpperCamelCase , _UpperCamelCase , *_UpperCamelCase ) assert my_res == py_res assert str(_UpperCamelCase ) == str(_UpperCamelCase ) assert set(_UpperCamelCase ) == set(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) assert set(my.items() ) == set(py.items() ) def lowerCamelCase_ ( ) -> Any: """simple docstring""" def is_public(_UpperCamelCase ) -> bool: return not name.startswith('''_''' ) snake_case_ : str = {name for name in dir({} ) if is_public(_UpperCamelCase )} snake_case_ : str = {name for name in dir(HashMap() ) if is_public(_UpperCamelCase )} assert dict_public_names > hash_public_names
60
0
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ =['image_processor', 'tokenizer'] lowerCamelCase__ ='ViTImageProcessor' lowerCamelCase__ =('CLIPTokenizer', 'CLIPTokenizerFast') def __init__(self , a_=None , a_=None , **a_ ): '''simple docstring''' __snake_case : Tuple = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , a_ , ) __snake_case : Optional[Any] = kwargs.pop('''feature_extractor''' ) __snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(a_ , a_ ) def __call__(self , a_=None , a_=None , a_=None , a_=None , **a_ ): '''simple docstring''' if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: __snake_case : int = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if visual_prompt is not None: __snake_case : Optional[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if images is not None: __snake_case : int = self.image_processor(a_ , return_tensors=a_ , **a_ ) if visual_prompt is not None and images is not None: __snake_case : Any = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: __snake_case : Tuple = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: __snake_case : Optional[int] = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.batch_decode(*a_ , **a_ ) def SCREAMING_SNAKE_CASE (self , *a_ , **a_ ): '''simple docstring''' return self.tokenizer.decode(*a_ , **a_ ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a_ , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a_ , ) return self.image_processor
229
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Optional[Any] = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='sew-d' def __init__(self , a_=32 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_=2 , a_=5_12 , a_=2_56 , a_=True , a_=True , a_=("p2c", "c2p") , a_="layer_norm" , a_="gelu_python" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.1 , a_=0.02 , a_=1E-7 , a_=1E-5 , a_="group" , a_="gelu" , a_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , a_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_=False , a_=1_28 , a_=16 , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_="mean" , a_=False , a_=False , a_=2_56 , a_=0 , a_=1 , a_=2 , **a_ , ): '''simple docstring''' super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ ) __snake_case : Any = hidden_size __snake_case : Tuple = feat_extract_norm __snake_case : int = feat_extract_activation __snake_case : List[str] = list(a_ ) __snake_case : Optional[Any] = list(a_ ) __snake_case : List[str] = list(a_ ) __snake_case : List[str] = conv_bias __snake_case : Dict = num_conv_pos_embeddings __snake_case : str = num_conv_pos_embedding_groups __snake_case : int = len(self.conv_dim ) __snake_case : List[Any] = num_hidden_layers __snake_case : List[Any] = intermediate_size __snake_case : Dict = squeeze_factor __snake_case : Optional[int] = max_position_embeddings __snake_case : List[Any] = position_buckets __snake_case : Union[str, Any] = share_att_key __snake_case : Tuple = relative_attention __snake_case : str = norm_rel_ebd __snake_case : Tuple = list(a_ ) __snake_case : Optional[int] = hidden_act __snake_case : int = num_attention_heads __snake_case : Optional[Any] = hidden_dropout __snake_case : Union[str, Any] = attention_dropout __snake_case : Any = activation_dropout __snake_case : Tuple = feat_proj_dropout __snake_case : str = final_dropout __snake_case : str = layer_norm_eps __snake_case : Tuple = feature_layer_norm_eps __snake_case : Tuple = initializer_range __snake_case : int = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case : Union[str, Any] = apply_spec_augment __snake_case : str = mask_time_prob __snake_case : Optional[Any] = mask_time_length __snake_case : List[Any] = mask_time_min_masks __snake_case : str = mask_feature_prob __snake_case : List[str] = mask_feature_length __snake_case : Optional[int] = mask_feature_min_masks # ctc loss __snake_case : Union[str, Any] = ctc_loss_reduction __snake_case : Optional[Any] = ctc_zero_infinity # sequence classification __snake_case : str = use_weighted_layer_sum __snake_case : Any = classifier_proj_size @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
229
1
from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A (__UpperCAmelCase ): def __init__( self , lowercase_ , lowercase_ ) -> Optional[int]: '''simple docstring''' super().__init__() self.register_modules(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) @torch.no_grad() def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = 50 , lowercase_ = "pil" , lowercase_ = True , **lowercase_ , ) -> Union[ImagePipelineOutput, Tuple]: '''simple docstring''' _snake_case : str = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase__ , ) _snake_case : Dict = image.to(self.device ) # set step values self.scheduler.set_timesteps(UpperCAmelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _snake_case : Dict = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _snake_case : str = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample _snake_case : str = (image / 2 + 0.5).clamp(0 , 1 ) _snake_case : str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _snake_case : Union[str, Any] = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=UpperCAmelCase__ ), "This is a local test"
326
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json lowercase__ : Dict = "sshleifer/mar_enro_6_3_student" class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' def lowerCAmelCase__ ( self : Dict ) ->List[str]: super().setUp() UpperCAmelCase_ = cached_path( '''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=UpperCAmelCase__ , ) UpperCAmelCase_ = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def lowerCAmelCase__ ( self : Dict ) ->List[str]: MarianMTModel.from_pretrained(UpperCAmelCase__ ) @slow @require_torch_gpu def lowerCAmelCase__ ( self : int ) ->Dict: UpperCAmelCase_ = { '''$MAX_LEN''': 64, '''$BS''': 64, '''$GAS''': 1, '''$ENRO_DIR''': self.data_dir, '''facebook/mbart-large-cc25''': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '''--learning_rate=3e-5''': '''--learning_rate 3e-4''', '''--num_train_epochs 6''': '''--num_train_epochs 1''', } # Clean up bash script UpperCAmelCase_ = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip() UpperCAmelCase_ = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase_ = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) ) UpperCAmelCase_ = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") UpperCAmelCase_ = f""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future UpperCAmelCase_ = ['''finetune.py'''] + bash_script.split() + args with patch.object(UpperCAmelCase__ , '''argv''' , UpperCAmelCase__ ): UpperCAmelCase_ = argparse.ArgumentParser() UpperCAmelCase_ = pl.Trainer.add_argparse_args(UpperCAmelCase__ ) UpperCAmelCase_ = SummarizationModule.add_model_specific_args(UpperCAmelCase__ , os.getcwd() ) UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = main(UpperCAmelCase__ ) # Check metrics UpperCAmelCase_ = load_json(model.metrics_save_path ) UpperCAmelCase_ = metrics['''val'''][0] UpperCAmelCase_ = metrics['''val'''][-1] self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , UpperCAmelCase__ ) self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase_ = os.listdir(UpperCAmelCase__ ) UpperCAmelCase_ = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase_ = os.path.join(args.output_dir , UpperCAmelCase__ ) UpperCAmelCase_ = torch.load(UpperCAmelCase__ , map_location='''cpu''' ) UpperCAmelCase_ = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase_ = {os.path.basename(UpperCAmelCase__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1 class lowerCamelCase ( lowerCamelCase ): '''simple docstring''' @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict: UpperCAmelCase_ = f"""{self.test_file_dir_str}/test_data/wmt_en_ro""" UpperCAmelCase_ = { '''--fp16_opt_level=O1''': '''''', '''$MAX_LEN''': 128, '''$BS''': 16, '''$GAS''': 1, '''$ENRO_DIR''': data_dir, '''$m''': '''sshleifer/student_marian_en_ro_6_1''', '''val_check_interval=0.25''': '''val_check_interval=1.0''', } # Clean up bash script UpperCAmelCase_ = ( (self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip() ) UpperCAmelCase_ = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' ) UpperCAmelCase_ = bash_script.replace('''--fp16 ''' , ''' ''' ) for k, v in env_vars_to_replace.items(): UpperCAmelCase_ = bash_script.replace(UpperCAmelCase__ , str(UpperCAmelCase__ ) ) UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = bash_script.replace('''--fp16''' , '''''' ) UpperCAmelCase_ = 6 UpperCAmelCase_ = ( ['''distillation.py'''] + bash_script.split() + [ f"""--output_dir={output_dir}""", '''--gpus=1''', '''--learning_rate=1e-3''', f"""--num_train_epochs={epochs}""", '''--warmup_steps=10''', '''--val_check_interval=1.0''', '''--do_predict''', ] ) with patch.object(UpperCAmelCase__ , '''argv''' , UpperCAmelCase__ ): UpperCAmelCase_ = argparse.ArgumentParser() UpperCAmelCase_ = pl.Trainer.add_argparse_args(UpperCAmelCase__ ) UpperCAmelCase_ = SummarizationDistiller.add_model_specific_args(UpperCAmelCase__ , os.getcwd() ) UpperCAmelCase_ = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu UpperCAmelCase_ = distill_main(UpperCAmelCase__ ) # Check metrics UpperCAmelCase_ = load_json(model.metrics_save_path ) UpperCAmelCase_ = metrics['''val'''][0] UpperCAmelCase_ = metrics['''val'''][-1] assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , UpperCAmelCase__ ) # check lightning ckpt can be loaded and has a reasonable statedict UpperCAmelCase_ = os.listdir(UpperCAmelCase__ ) UpperCAmelCase_ = [x for x in contents if x.endswith('''.ckpt''' )][0] UpperCAmelCase_ = os.path.join(args.output_dir , UpperCAmelCase__ ) UpperCAmelCase_ = torch.load(UpperCAmelCase__ , map_location='''cpu''' ) UpperCAmelCase_ = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight''' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: UpperCAmelCase_ = {os.path.basename(UpperCAmelCase__ ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['''test'''] ) == 1
390
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __magic_name__ : def __init__( self , snake_case , ) -> int: '''simple docstring''' _UpperCAmelCase : Dict =parent _UpperCAmelCase : Dict =1_3 _UpperCAmelCase : Optional[Any] =7 _UpperCAmelCase : Union[str, Any] =True _UpperCAmelCase : str =True _UpperCAmelCase : Optional[Any] =True _UpperCAmelCase : List[Any] =9_9 _UpperCAmelCase : Union[str, Any] =3_2 _UpperCAmelCase : str =2 _UpperCAmelCase : Optional[int] =4 _UpperCAmelCase : Optional[int] =3_7 _UpperCAmelCase : Any ='gelu' _UpperCAmelCase : Tuple =0.1 _UpperCAmelCase : List[Any] =0.1 _UpperCAmelCase : int =5_1_2 _UpperCAmelCase : Tuple =1_6 _UpperCAmelCase : Any =2 _UpperCAmelCase : Tuple =0.02 _UpperCAmelCase : Union[str, Any] =3 _UpperCAmelCase : int =4 _UpperCAmelCase : Optional[Any] =None def lowerCAmelCase ( self) -> Any: '''simple docstring''' _UpperCAmelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCAmelCase : Tuple =None if self.use_input_mask: _UpperCAmelCase : Tuple =random_attention_mask([self.batch_size, self.seq_length]) _UpperCAmelCase : List[str] =None _UpperCAmelCase : Tuple =None _UpperCAmelCase : Dict =None if self.use_labels: _UpperCAmelCase : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCAmelCase : int =ids_tensor([self.batch_size] , self.num_choices) _UpperCAmelCase : str =EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' ( _UpperCAmelCase ) : Any =self.prepare_config_and_inputs() _UpperCAmelCase : Dict =True _UpperCAmelCase : Optional[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) _UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> str: '''simple docstring''' _UpperCAmelCase : int =TFEsmModel(config=_lowercase) _UpperCAmelCase : List[str] ={'input_ids': input_ids, 'attention_mask': input_mask} _UpperCAmelCase : Tuple =model(_lowercase) _UpperCAmelCase : Dict =[input_ids, input_mask] _UpperCAmelCase : List[Any] =model(_lowercase) _UpperCAmelCase : Tuple =model(_lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> List[Any]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =True _UpperCAmelCase : Tuple =TFEsmModel(config=_lowercase) _UpperCAmelCase : int ={ 'input_ids': input_ids, 'attention_mask': input_mask, 'encoder_hidden_states': encoder_hidden_states, 'encoder_attention_mask': encoder_attention_mask, } _UpperCAmelCase : Optional[int] =model(_lowercase) _UpperCAmelCase : str =[input_ids, input_mask] _UpperCAmelCase : Union[str, Any] =model(_lowercase , encoder_hidden_states=_lowercase) # Also check the case where encoder outputs are not passed _UpperCAmelCase : List[Any] =model(_lowercase , attention_mask=_lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Tuple: '''simple docstring''' _UpperCAmelCase : Optional[Any] =TFEsmForMaskedLM(config=_lowercase) _UpperCAmelCase : List[Any] =model([input_ids, input_mask]) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =self.num_labels _UpperCAmelCase : Dict =TFEsmForTokenClassification(config=_lowercase) _UpperCAmelCase : str ={'input_ids': input_ids, 'attention_mask': input_mask} _UpperCAmelCase : int =model(_lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def lowerCAmelCase ( self) -> Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =self.prepare_config_and_inputs() ( _UpperCAmelCase ) : List[str] =config_and_inputs _UpperCAmelCase : Optional[int] ={'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class __magic_name__ ( UpperCAmelCase_ ,UpperCAmelCase_ ,unittest.TestCase ): UpperCAmelCase =( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) UpperCAmelCase =( { 'feature-extraction': TFEsmModel, 'fill-mask': TFEsmForMaskedLM, 'text-classification': TFEsmForSequenceClassification, 'token-classification': TFEsmForTokenClassification, 'zero-shot': TFEsmForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase =False UpperCAmelCase =False def lowerCAmelCase ( self) -> Any: '''simple docstring''' _UpperCAmelCase : Optional[int] =TFEsmModelTester(self) _UpperCAmelCase : Any =ConfigTester(self , config_class=_lowercase , hidden_size=3_7) def lowerCAmelCase ( self) -> str: '''simple docstring''' self.config_tester.run_common_tests() def lowerCAmelCase ( self) -> Any: '''simple docstring''' _UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase) def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_lowercase) def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowercase) def lowerCAmelCase ( self) -> str: '''simple docstring''' _UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowercase) @slow def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : List[str] =TFEsmModel.from_pretrained(_lowercase) self.assertIsNotNone(_lowercase) @unittest.skip('Protein models do not support embedding resizing.') def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' pass @unittest.skip('Protein models do not support embedding resizing.') def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' pass def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCAmelCase : str =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : List[Any] =model_class(_lowercase) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer _UpperCAmelCase : Any =model.get_bias() assert isinstance(_lowercase , _lowercase) for k, v in name.items(): assert isinstance(_lowercase , tf.Variable) else: _UpperCAmelCase : Union[str, Any] =model.get_output_embeddings() assert x is None _UpperCAmelCase : Tuple =model.get_bias() assert name is None @require_tf class __magic_name__ ( unittest.TestCase ): @slow def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[int] =TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D') _UpperCAmelCase : Any =tf.constant([[0, 1, 2, 3, 4, 5]]) _UpperCAmelCase : Tuple =model(_lowercase)[0] _UpperCAmelCase : int =[1, 6, 3_3] self.assertEqual(list(output.numpy().shape) , _lowercase) # compare the actual values for a slice. _UpperCAmelCase : Union[str, Any] =tf.constant( [ [ [8.92_15_18, -10.58_98_14, -6.4_67_13_07], [-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15], [-7.78_12_47, -13.95_15_57, -3.74_05_92], ] ]) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2)) @slow def lowerCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Dict =TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D') _UpperCAmelCase : int =tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]]) _UpperCAmelCase : str =model(_lowercase)[0] # compare the actual values for a slice. _UpperCAmelCase : Optional[int] =tf.constant( [ [ [0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39], [0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22], [0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28], ] ]) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
707
'''simple docstring''' import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ): UpperCAmelCase =None UpperCAmelCase =BloomTokenizerFast UpperCAmelCase =BloomTokenizerFast UpperCAmelCase =True UpperCAmelCase =False UpperCAmelCase ="tokenizer_file" UpperCAmelCase ={"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def lowerCAmelCase ( self) -> Any: '''simple docstring''' super().setUp() _UpperCAmelCase : Union[str, Any] =BloomTokenizerFast.from_pretrained('bigscience/tokenizer') tokenizer.save_pretrained(self.tmpdirname) def lowerCAmelCase ( self , **snake_case) -> List[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case) def lowerCAmelCase ( self) -> int: '''simple docstring''' _UpperCAmelCase : Optional[Any] =self.get_rust_tokenizer() _UpperCAmelCase : Any =['The quick brown fox</s>', 'jumps over the lazy dog</s>'] _UpperCAmelCase : int =[[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]] _UpperCAmelCase : Tuple =tokenizer.batch_encode_plus(snake_case)['input_ids'] self.assertListEqual(snake_case , snake_case) _UpperCAmelCase : Any =tokenizer.batch_decode(snake_case) self.assertListEqual(snake_case , snake_case) def lowerCAmelCase ( self , snake_case=6) -> str: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): _UpperCAmelCase : Optional[int] =self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input _UpperCAmelCase : Dict ='This is a simple input' _UpperCAmelCase : str =['This is a simple input 1', 'This is a simple input 2'] _UpperCAmelCase : List[Any] =('This is a simple input', 'This is a pair') _UpperCAmelCase : Union[str, Any] =[ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(snake_case , max_length=snake_case) tokenizer_r.encode_plus(snake_case , max_length=snake_case) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case) tokenizer_r.encode(snake_case , max_length=snake_case) tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding') _UpperCAmelCase : Tuple =None # Hotfixing padding = None self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length') # Simple input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length') # Simple input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) # Pair input self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length') # Pair input self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length') # Pair input self.assertRaises( snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , ) def lowerCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCAmelCase : Dict =self.get_rust_tokenizer() _UpperCAmelCase : List[Any] =load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case) _UpperCAmelCase : List[Any] =next(iter(snake_case))['premise'] # pick up one data _UpperCAmelCase : Union[str, Any] =list(sample_data.values()) _UpperCAmelCase : Dict =list(map(tokenizer.encode , snake_case)) _UpperCAmelCase : Optional[Any] =[tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case) for x in output_tokens] self.assertListEqual(snake_case , snake_case) def lowerCAmelCase ( self) -> Optional[Any]: '''simple docstring''' # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
331
0
'''simple docstring''' import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class UpperCAmelCase_ ( __A , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = BertJapaneseTokenizer UpperCamelCase_ = False UpperCamelCase_ = True def A__ ( self : Optional[int] ) -> Any: '''simple docstring''' super().setUp() lowercase : Optional[int] =[ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] lowercase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def A__ ( self : List[str] , UpperCAmelCase : Any ) -> Tuple: '''simple docstring''' lowercase : Optional[int] ='''こんにちは、世界。 \nこんばんは、世界。''' lowercase : Dict ='''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def A__ ( self : Tuple , UpperCAmelCase : List[str] ) -> int: '''simple docstring''' lowercase , lowercase : List[str] =self.get_input_output_texts(UpperCAmelCase ) lowercase : Optional[Any] =tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) lowercase : Optional[Any] =tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase ) return text, ids def A__ ( self : List[str] ) -> List[Any]: '''simple docstring''' pass # TODO add if relevant def A__ ( self : Tuple ) -> List[str]: '''simple docstring''' pass # TODO add if relevant def A__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' pass # TODO add if relevant def A__ ( self : List[Any] ) -> Tuple: '''simple docstring''' lowercase : str =self.tokenizer_class(self.vocab_file ) lowercase : Dict =tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def A__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' lowercase : Optional[int] =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(UpperCAmelCase ) lowercase : Dict ='''こんにちは、世界。\nこんばんは、世界。''' lowercase : Optional[Any] =tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowercase : Union[str, Any] =os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase , '''wb''' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , '''rb''' ) as handle: lowercase : Tuple =pickle.load(UpperCAmelCase ) lowercase : Dict =tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def A__ ( self : List[str] ) -> List[Any]: '''simple docstring''' lowercase : int =MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def A__ ( self : List[str] ) -> Any: '''simple docstring''' try: lowercase : Tuple =MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def A__ ( self : Optional[Any] ) -> str: '''simple docstring''' try: lowercase : Optional[int] =MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def A__ ( self : Dict ) -> Tuple: '''simple docstring''' lowercase : Tuple =MecabTokenizer(do_lower_case=UpperCAmelCase , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def A__ ( self : str ) -> str: '''simple docstring''' try: lowercase : Any =MecabTokenizer( do_lower_case=UpperCAmelCase , normalize_text=UpperCAmelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def A__ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' lowercase : List[str] =MecabTokenizer(normalize_text=UpperCAmelCase , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def A__ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' lowercase : int =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(UpperCAmelCase ) lowercase : Optional[int] ='''こんにちは、世界。\nこんばんは、世界。''' lowercase : Any =tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowercase : Union[str, Any] =os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase , '''wb''' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , '''rb''' ) as handle: lowercase : Tuple =pickle.load(UpperCAmelCase ) lowercase : Tuple =tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_sudachi def A__ ( self : Optional[int] ) -> int: '''simple docstring''' lowercase : str =SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def A__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' lowercase : Any =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def A__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' lowercase : int =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def A__ ( self : Optional[int] ) -> str: '''simple docstring''' lowercase : Optional[Any] =SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def A__ ( self : Dict ) -> Dict: '''simple docstring''' lowercase : Optional[int] =SudachiTokenizer(do_lower_case=UpperCAmelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def A__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' lowercase : int =SudachiTokenizer(normalize_text=UpperCAmelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def A__ ( self : List[str] ) -> List[str]: '''simple docstring''' lowercase : int =SudachiTokenizer(trim_whitespace=UpperCAmelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def A__ ( self : Optional[int] ) -> Any: '''simple docstring''' lowercase : int =self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(UpperCAmelCase ) lowercase : Dict ='''こんにちは、世界。\nこんばんは、世界。''' lowercase : List[Any] =tokenizer.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) lowercase : Any =os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(UpperCAmelCase , '''wb''' ) as handle: pickle.dump(UpperCAmelCase , UpperCAmelCase ) with open(UpperCAmelCase , '''rb''' ) as handle: lowercase : Union[str, Any] =pickle.load(UpperCAmelCase ) lowercase : str =tokenizer_new.tokenize(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_jumanpp def A__ ( self : Any ) -> Any: '''simple docstring''' lowercase : Any =JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def A__ ( self : int ) -> int: '''simple docstring''' lowercase : Any =JumanppTokenizer(do_lower_case=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def A__ ( self : str ) -> Optional[Any]: '''simple docstring''' lowercase : List[str] =JumanppTokenizer(normalize_text=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def A__ ( self : str ) -> str: '''simple docstring''' lowercase : Any =JumanppTokenizer(trim_whitespace=UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def A__ ( self : List[str] ) -> Any: '''simple docstring''' lowercase : Union[str, Any] =JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def A__ ( self : Union[str, Any] ) -> int: '''simple docstring''' lowercase : int =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] lowercase : Optional[Any] ={} for i, token in enumerate(UpperCAmelCase ): lowercase : str =i lowercase : int =WordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def A__ ( self : Any ) -> Tuple: '''simple docstring''' lowercase : List[Any] =BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) lowercase : Tuple =tokenizer.subword_tokenizer lowercase : Union[str, Any] =subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(UpperCAmelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) lowercase : Optional[Any] =subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(UpperCAmelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def A__ ( self : str ) -> Optional[Any]: '''simple docstring''' lowercase : Optional[Any] =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) lowercase : Tuple =tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase ) lowercase : Tuple =tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase ) lowercase : Any =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) lowercase : Tuple =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class UpperCAmelCase_ ( __A , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = BertJapaneseTokenizer UpperCamelCase_ = False def A__ ( self : str ) -> Optional[int]: '''simple docstring''' super().setUp() lowercase : Any =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowercase : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def A__ ( self : Dict , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **UpperCAmelCase ) def A__ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> Optional[int]: '''simple docstring''' lowercase : int ='''こんにちは、世界。 \nこんばんは、世界。''' lowercase : Optional[int] ='''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def A__ ( self : Dict ) -> int: '''simple docstring''' pass # TODO add if relevant def A__ ( self : str ) -> str: '''simple docstring''' pass # TODO add if relevant def A__ ( self : Optional[int] ) -> Dict: '''simple docstring''' pass # TODO add if relevant def A__ ( self : Dict ) -> List[Any]: '''simple docstring''' lowercase : Dict =self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) lowercase : List[str] =tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( UpperCAmelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def A__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' lowercase : List[Any] =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] lowercase : List[Any] ={} for i, token in enumerate(UpperCAmelCase ): lowercase : Tuple =i lowercase : Dict =CharacterTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def A__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' lowercase : str =self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) lowercase : Tuple =tokenizer.encode('''ありがとう。''' , add_special_tokens=UpperCAmelCase ) lowercase : List[str] =tokenizer.encode('''どういたしまして。''' , add_special_tokens=UpperCAmelCase ) lowercase : Any =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase ) lowercase : List[str] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def A__ ( self : Tuple ) -> List[Any]: '''simple docstring''' lowercase : Optional[int] ='''cl-tohoku/bert-base-japanese''' lowercase : Optional[Any] =AutoTokenizer.from_pretrained(UpperCAmelCase ) self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def A__ ( self : Optional[int] ) -> Any: '''simple docstring''' lowercase : str ='''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) lowercase : str ='''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
94
"""simple docstring""" import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 a_ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") a_ = get_tests_dir("""fixtures/vocab.json""") a_ = get_tests_dir("""fixtures""") class __snake_case ( unittest.TestCase ): """simple docstring""" _lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] def UpperCamelCase__( self ): '''simple docstring''' __A : Optional[Any] = 0 def UpperCamelCase__( self ): '''simple docstring''' __A : str = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A : str = WavaVecaConfig() __A : Dict = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(__lowerCamelCase ) processor.save_pretrained(__lowerCamelCase ) __A : List[str] = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) ) copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , '''vocab.json''' ) ) __A : Union[str, Any] = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A : Union[str, Any] = WavaVecaFeatureExtractor() __A : Optional[Any] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) __A : Union[str, Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase ) # save in new folder processor.save_pretrained(__lowerCamelCase ) # drop `processor_class` in tokenizer with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''r''' ) as f: __A : Optional[int] = json.load(__lowerCamelCase ) config_dict.pop('''processor_class''' ) with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f: f.write(json.dumps(__lowerCamelCase ) ) __A : Tuple = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A : Optional[int] = WavaVecaFeatureExtractor() __A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) __A : List[Any] = WavaVecaProcessor(__lowerCamelCase , __lowerCamelCase ) # save in new folder processor.save_pretrained(__lowerCamelCase ) # drop `processor_class` in feature extractor with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''r''' ) as f: __A : int = json.load(__lowerCamelCase ) config_dict.pop('''processor_class''' ) with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f: f.write(json.dumps(__lowerCamelCase ) ) __A : Optional[int] = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __A : Optional[Any] = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(__lowerCamelCase ) # copy relevant files copyfile(__lowerCamelCase , os.path.join(__lowerCamelCase , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(__lowerCamelCase , __lowerCamelCase ) , '''w''' ) as f: f.write('''{}''' ) __A : Union[str, Any] = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' with self.assertRaises(__lowerCamelCase ): __A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowerCamelCase ): __A : Dict = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase ) __A : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) __A : Union[str, Any] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) __A : Optional[int] = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version __A : int = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase , use_fast=__lowerCamelCase ) __A : Optional[int] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def UpperCamelCase__( self ): '''simple docstring''' try: AutoConfig.register('''custom''' , __lowerCamelCase ) AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase ) AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase ) AutoProcessor.register(__lowerCamelCase , __lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCamelCase ): AutoProcessor.register(__lowerCamelCase , __lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API __A : str = CustomFeatureExtractor.from_pretrained(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: __A : Optional[Any] = os.path.join(__lowerCamelCase , '''vocab.txt''' ) with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) __A : Dict = CustomTokenizer(__lowerCamelCase ) __A : Optional[Any] = CustomProcessor(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(__lowerCamelCase ) __A : List[str] = AutoProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__( self ): '''simple docstring''' class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = False class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = False class __snake_case ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _lowerCamelCase = """AutoFeatureExtractor""" _lowerCamelCase = """AutoTokenizer""" _lowerCamelCase = False try: AutoConfig.register('''custom''' , __lowerCamelCase ) AutoFeatureExtractor.register(__lowerCamelCase , __lowerCamelCase ) AutoTokenizer.register(__lowerCamelCase , slow_tokenizer_class=__lowerCamelCase ) AutoProcessor.register(__lowerCamelCase , __lowerCamelCase ) # If remote code is not set, the default is to use local classes. __A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. __A : int = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. __A : str = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__lowerCamelCase ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase__( self ): '''simple docstring''' __A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def UpperCamelCase__( self ): '''simple docstring''' __A : int = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class __snake_case ( unittest.TestCase ): """simple docstring""" _lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def UpperCamelCase__( cls ): '''simple docstring''' __A : Optional[int] = TOKEN HfFolder.save_token(__lowerCamelCase ) @classmethod def UpperCamelCase__( cls ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def UpperCamelCase__( self ): '''simple docstring''' __A : Tuple = WavaVecaProcessor.from_pretrained(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__lowerCamelCase , '''test-processor''' ) , push_to_hub=__lowerCamelCase , use_auth_token=self._token ) __A : Tuple = WavaVecaProcessor.from_pretrained(F"""{USER}/test-processor""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def UpperCamelCase__( self ): '''simple docstring''' __A : Optional[int] = WavaVecaProcessor.from_pretrained(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(__lowerCamelCase , '''test-processor-org''' ) , push_to_hub=__lowerCamelCase , use_auth_token=self._token , organization='''valid_org''' , ) __A : int = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(__lowerCamelCase , getattr(new_processor.feature_extractor , __lowerCamelCase ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def UpperCamelCase__( self ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() __A : Any = CustomFeatureExtractor.from_pretrained(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: __A : List[Any] = os.path.join(__lowerCamelCase , '''vocab.txt''' ) with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) __A : Optional[int] = CustomTokenizer(__lowerCamelCase ) __A : Any = CustomProcessor(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F"""{USER}/test-dynamic-processor""" , token=self._token ) __A : Tuple = Repository(__lowerCamelCase , clone_from=F"""{USER}/test-dynamic-processor""" , token=self._token ) processor.save_pretrained(__lowerCamelCase ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(__lowerCamelCase , '''tokenizer_config.json''' ) ) as f: __A : Tuple = json.load(__lowerCamelCase ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(__lowerCamelCase , '''custom_processing.py''' ) ) ) repo.push_to_hub() __A : Any = AutoProcessor.from_pretrained(F"""{USER}/test-dynamic-processor""" , trust_remote_code=__lowerCamelCase ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
177
0
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = """The Nymphenburg Palace is a beautiful palace in Munich!""" def __A ( a_ : str ,a_ : str ): lowerCAmelCase : List[str] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1_0_2_4, "hidden_size": 7_6_8, "max_length": 5_1_2, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1_0_2_4, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1e-5, "token_type_vocab_size": 2, } lowerCAmelCase : Dict = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowerCAmelCase : str = BERTEncoder( attention_cell=predefined_args["attention_cell"] ,num_layers=predefined_args["num_layers"] ,units=predefined_args["units"] ,hidden_size=predefined_args["hidden_size"] ,max_length=predefined_args["max_length"] ,num_heads=predefined_args["num_heads"] ,scaled=predefined_args["scaled"] ,dropout=predefined_args["dropout"] ,output_attention=a_ ,output_all_encodings=a_ ,use_residual=predefined_args["use_residual"] ,activation=predefined_args.get("activation" ,"gelu" ) ,layer_norm_eps=predefined_args.get("layer_norm_eps" ,a_ ) ,) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowerCAmelCase : int = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab lowerCAmelCase : int = os.path.join(get_home_dir() ,"models" ) lowerCAmelCase : List[str] = _load_vocab(a_ ,a_ ,a_ ,cls=a_ ) lowerCAmelCase : List[str] = nlp.model.BERTModel( a_ ,len(a_ ) ,units=predefined_args["units"] ,embed_size=predefined_args["embed_size"] ,embed_dropout=predefined_args["embed_dropout"] ,word_embed=predefined_args["word_embed"] ,use_pooler=a_ ,use_token_type_embed=a_ ,token_type_vocab_size=predefined_args["token_type_vocab_size"] ,use_classifier=a_ ,use_decoder=a_ ,) original_bort.load_parameters(a_ ,cast_dtype=a_ ,ignore_extra=a_ ) lowerCAmelCase : Optional[Any] = original_bort._collect_params_with_prefix() # Build our config 🤗 lowerCAmelCase : Tuple = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.0_2, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(a_ ), } lowerCAmelCase : Optional[Any] = BertConfig.from_dict(a_ ) lowerCAmelCase : Tuple = BertForMaskedLM(a_ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(a_ : Optional[Any] ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(a_ : Any ,a_ : Optional[Any] ): lowerCAmelCase : Any = hf_param.shape lowerCAmelCase : Any = to_torch(params[gluon_param] ) lowerCAmelCase : Tuple = gluon_param.shape assert ( shape_hf == shape_gluon ), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers''' return gluon_param lowerCAmelCase : Optional[int] = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight ,"word_embed.0.weight" ) lowerCAmelCase : str = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight ,"encoder.position_weight" ) lowerCAmelCase : List[str] = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias ,"encoder.layer_norm.beta" ) lowerCAmelCase : Dict = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight ,"encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowerCAmelCase : Optional[Any] = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowerCAmelCase : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention lowerCAmelCase : BertSelfAttention = layer.attention.self lowerCAmelCase : List[str] = check_and_map_params( self_attn.key.bias.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' ) lowerCAmelCase : Union[str, Any] = check_and_map_params( self_attn.key.weight.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' ) lowerCAmelCase : Tuple = check_and_map_params( self_attn.query.bias.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' ) lowerCAmelCase : Dict = check_and_map_params( self_attn.query.weight.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' ) lowerCAmelCase : Optional[Any] = check_and_map_params( self_attn.value.bias.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' ) lowerCAmelCase : int = check_and_map_params( self_attn.value.weight.data ,f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' ) # self attention output lowerCAmelCase : BertSelfOutput = layer.attention.output lowerCAmelCase : str = check_and_map_params( self_output.dense.bias ,f'''encoder.transformer_cells.{i}.proj.bias''' ) lowerCAmelCase : List[Any] = check_and_map_params( self_output.dense.weight ,f'''encoder.transformer_cells.{i}.proj.weight''' ) lowerCAmelCase : str = check_and_map_params( self_output.LayerNorm.bias ,f'''encoder.transformer_cells.{i}.layer_norm.beta''' ) lowerCAmelCase : List[str] = check_and_map_params( self_output.LayerNorm.weight ,f'''encoder.transformer_cells.{i}.layer_norm.gamma''' ) # intermediate lowerCAmelCase : BertIntermediate = layer.intermediate lowerCAmelCase : int = check_and_map_params( intermediate.dense.bias ,f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' ) lowerCAmelCase : Union[str, Any] = check_and_map_params( intermediate.dense.weight ,f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' ) # output lowerCAmelCase : BertOutput = layer.output lowerCAmelCase : Tuple = check_and_map_params( bert_output.dense.bias ,f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' ) lowerCAmelCase : int = check_and_map_params( bert_output.dense.weight ,f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' ) lowerCAmelCase : str = check_and_map_params( bert_output.LayerNorm.bias ,f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' ) lowerCAmelCase : Union[str, Any] = check_and_map_params( bert_output.LayerNorm.weight ,f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowerCAmelCase : Union[str, Any] = RobertaTokenizer.from_pretrained("roberta-base" ) lowerCAmelCase : Union[str, Any] = tokenizer.encode_plus(a_ )["input_ids"] # Get gluon output lowerCAmelCase : List[Any] = mx.nd.array([input_ids] ) lowerCAmelCase : int = original_bort(inputs=a_ ,token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(a_ ) lowerCAmelCase : List[str] = BertModel.from_pretrained(a_ ) hf_bort_model.eval() lowerCAmelCase : Optional[int] = tokenizer.encode_plus(a_ ,return_tensors="pt" ) lowerCAmelCase : str = hf_bort_model(**a_ )[0] lowerCAmelCase : List[Any] = output_gluon[0].asnumpy() lowerCAmelCase : int = output_hf[0].detach().numpy() lowerCAmelCase : Dict = np.max(np.abs(hf_layer - gluon_layer ) ).item() lowerCAmelCase : Dict = np.allclose(a_ ,a_ ,atol=1e-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" ,a_ ) if __name__ == "__main__": lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
551
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class lowerCamelCase ( unittest.TestCase ): def _lowerCamelCase ( self , a_ ): for model_result in results.values(): for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ): lowerCAmelCase : Tuple = model_result["result"][batch_size][sequence_length] self.assertIsNotNone(a_ ) def _lowerCamelCase ( self ): lowerCAmelCase : Optional[int] = "sshleifer/tiny-gpt2" lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(a_ ) lowerCAmelCase : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): lowerCAmelCase : Union[str, Any] = "sgugger/tiny-distilbert-classification" lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , ) lowerCAmelCase : Any = TensorFlowBenchmark(a_ ) lowerCAmelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2" lowerCAmelCase : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) lowerCAmelCase : List[str] = TensorFlowBenchmark(a_ ) lowerCAmelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): lowerCAmelCase : List[str] = "sshleifer/tiny-gpt2" lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(a_ ) lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , ) lowerCAmelCase : Any = TensorFlowBenchmark(a_ , [config] ) lowerCAmelCase : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): lowerCAmelCase : int = "sshleifer/tiny-gpt2" lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(a_ ) lowerCAmelCase : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) lowerCAmelCase : Tuple = TensorFlowBenchmark(a_ , [config] ) lowerCAmelCase : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): lowerCAmelCase : Tuple = "sshleifer/tiny-gpt2" lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) lowerCAmelCase : List[str] = TensorFlowBenchmark(a_ ) lowerCAmelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ): lowerCAmelCase : List[Any] = "sshleifer/tiny-gpt2" lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(a_ ) lowerCAmelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) lowerCAmelCase : Dict = TensorFlowBenchmark(a_ , [config] ) lowerCAmelCase : int = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _lowerCamelCase ( self ): lowerCAmelCase : str = "patrickvonplaten/t5-tiny-random" lowerCAmelCase : Tuple = AutoConfig.from_pretrained(a_ ) lowerCAmelCase : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , ) lowerCAmelCase : List[str] = TensorFlowBenchmark(a_ , configs=[config] ) lowerCAmelCase : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." ) def _lowerCamelCase ( self ): lowerCAmelCase : str = "sshleifer/tiny-gpt2" lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , ) lowerCAmelCase : Any = TensorFlowBenchmark(a_ ) lowerCAmelCase : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _lowerCamelCase ( self ): lowerCAmelCase : Tuple = "sshleifer/tiny-gpt2" with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(a_ , "inf_mem.csv" ) , env_info_csv_file=os.path.join(a_ , "env.csv" ) , multi_process=a_ , ) lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(a_ ) benchmark.run() self.assertTrue(Path(os.path.join(a_ , "inf_time.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , "inf_mem.csv" ) ).exists() ) self.assertTrue(Path(os.path.join(a_ , "env.csv" ) ).exists() ) def _lowerCamelCase ( self ): lowerCAmelCase : Any = "sshleifer/tiny-gpt2" def _check_summary_is_not_empty(a_ ): self.assertTrue(hasattr(a_ , "sequential" ) ) self.assertTrue(hasattr(a_ , "cumulative" ) ) self.assertTrue(hasattr(a_ , "current" ) ) self.assertTrue(hasattr(a_ , "total" ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , "log.txt" ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , ) lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(a_ ) lowerCAmelCase : Tuple = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a_ , "log.txt" ) ).exists() )
551
1
"""simple docstring""" from __future__ import annotations def __magic_name__ ( UpperCamelCase : List[str] , UpperCamelCase : Dict ) -> int: if len(UpperCamelCase ) <= 1 or n <= 1: return insert_next(UpperCamelCase , n - 1 ) rec_insertion_sort(UpperCamelCase , n - 1 ) def __magic_name__ ( UpperCamelCase : str , UpperCamelCase : int ) -> Optional[Any]: if index >= len(UpperCamelCase ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order a__ = ( collection[index], collection[index - 1], ) insert_next(UpperCamelCase , index + 1 ) if __name__ == "__main__": a : Union[str, Any] = input('Enter integers separated by spaces: ') a : Optional[Any] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
273
import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : int = LayoutLMTokenizer UpperCamelCase_ : str = LayoutLMTokenizerFast UpperCamelCase_ : Any = True UpperCamelCase_ : Optional[Any] = True def _A ( self : Any ): super().setUp() SCREAMING_SNAKE_CASE : Optional[Any] = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _A ( self : str , **UpperCAmelCase_ : Optional[int] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ ) def _A ( self : Tuple , UpperCAmelCase_ : List[str] ): SCREAMING_SNAKE_CASE : Any = "UNwant\u00E9d,running" SCREAMING_SNAKE_CASE : Union[str, Any] = "unwanted, running" return input_text, output_text def _A ( self : int ): SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] ) def _A ( self : List[str] ): pass
62
0
"""simple docstring""" from typing import List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging A = logging.get_logger(__name__) A = { '''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''', } class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = '''autoformer''' __lowerCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "student_t" , _UpperCAmelCase = "nll" , _UpperCAmelCase = 1 , _UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCAmelCase = True , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = 0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 64 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , _UpperCAmelCase = 32 , _UpperCAmelCase = 32 , _UpperCAmelCase = "gelu" , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 0.1 , _UpperCAmelCase = 100 , _UpperCAmelCase = 0.0_2 , _UpperCAmelCase = True , _UpperCAmelCase=True , _UpperCAmelCase = 10 , _UpperCAmelCase = 25 , _UpperCAmelCase = 3 , **_UpperCAmelCase , ): # time series specific configuration __a : Optional[int] = prediction_length __a : Optional[int] = context_length if context_length is not None else prediction_length __a : Union[str, Any] = distribution_output __a : str = loss __a : Optional[Any] = input_size __a : str = num_time_features __a : Optional[Any] = lags_sequence __a : Any = scaling __a : Optional[Any] = num_dynamic_real_features __a : str = num_static_real_features __a : Any = num_static_categorical_features if cardinality is not None and num_static_categorical_features > 0: if len(_UpperCAmelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) __a : List[str] = cardinality else: __a : Any = [0] if embedding_dimension is not None and num_static_categorical_features > 0: if len(_UpperCAmelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) __a : Any = embedding_dimension else: __a : Tuple = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] __a : Union[str, Any] = num_parallel_samples # Transformer architecture configuration __a : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features __a : List[Any] = d_model __a : List[str] = encoder_attention_heads __a : Union[str, Any] = decoder_attention_heads __a : Optional[int] = encoder_ffn_dim __a : List[str] = decoder_ffn_dim __a : List[str] = encoder_layers __a : Optional[int] = decoder_layers __a : Union[str, Any] = dropout __a : Union[str, Any] = attention_dropout __a : Optional[Any] = activation_dropout __a : List[str] = encoder_layerdrop __a : Any = decoder_layerdrop __a : Union[str, Any] = activation_function __a : Union[str, Any] = init_std __a : List[str] = use_cache # Autoformer __a : int = label_length __a : List[Any] = moving_average __a : str = autocorrelation_factor super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase ) @property def _lowerCamelCase ( self ): return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
712
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. A = abspath(join(dirname(dirname(dirname(__file__))), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __A ( a_ :Tuple) -> Dict: from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(a_) def __A ( a_ :Any) -> int: from transformers.testing_utils import pytest_terminal_summary_main __a : str = terminalreporter.config.getoption('''--make-reports''') if make_reports: pytest_terminal_summary_main(a_ , id=a_)
101
0
"""simple docstring""" import math from collections.abc import Iterator from itertools import takewhile def __snake_case ( _lowercase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(_lowercase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __snake_case ( ): """simple docstring""" UpperCamelCase = 2 while True: if is_prime(_lowercase ): yield num num += 1 def __snake_case ( _lowercase = 200_0000 ): """simple docstring""" return sum(takewhile(lambda _lowercase : x < n ,prime_generator() ) ) if __name__ == "__main__": print(f'{solution() = }')
34
'''simple docstring''' import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class A : def __init__( self : List[str] , __a : Any , __a : int=9_9 , __a : Any=1_3 , __a : Tuple=7 , __a : Tuple=9 , __a : Tuple=True , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Optional[Any]=3_2 , __a : str=5 , __a : Optional[int]=4 , __a : Union[str, Any]=3_7 , __a : List[str]=8 , __a : Optional[int]=0.1 , __a : List[str]=0.0_0_2 , __a : List[Any]=1 , __a : str=0 , __a : Dict=0 , __a : int=None , __a : List[Any]=None , ) -> Tuple: __UpperCAmelCase = parent __UpperCAmelCase = batch_size __UpperCAmelCase = encoder_seq_length __UpperCAmelCase = decoder_seq_length # For common tests __UpperCAmelCase = self.decoder_seq_length __UpperCAmelCase = is_training __UpperCAmelCase = use_attention_mask __UpperCAmelCase = use_labels __UpperCAmelCase = vocab_size __UpperCAmelCase = hidden_size __UpperCAmelCase = num_hidden_layers __UpperCAmelCase = num_attention_heads __UpperCAmelCase = d_ff __UpperCAmelCase = relative_attention_num_buckets __UpperCAmelCase = dropout_rate __UpperCAmelCase = initializer_factor __UpperCAmelCase = eos_token_id __UpperCAmelCase = pad_token_id __UpperCAmelCase = decoder_start_token_id __UpperCAmelCase = None __UpperCAmelCase = decoder_layers def snake_case__ ( self : Union[str, Any] ) -> int: return TaConfig.from_pretrained('''google/umt5-base''' ) def snake_case__ ( self : List[Any] , __a : List[str] , __a : str , __a : Optional[int] , __a : List[Any]=None , __a : List[Any]=None , __a : Any=None , __a : str=None , __a : Any=None , ) -> List[Any]: if attention_mask is None: __UpperCAmelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__a ) if decoder_head_mask is None: __UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__a ) if cross_attn_head_mask is None: __UpperCAmelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=__a ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def snake_case__ ( self : List[str] ) -> Dict: __UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 ) __UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __UpperCAmelCase = self.get_config() __UpperCAmelCase = config.num_attention_heads __UpperCAmelCase = self.prepare_inputs_dict(__a , __a , __a ) return config, input_dict def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs() return config, inputs_dict def snake_case__ ( self : int ) -> Optional[int]: return TaConfig( vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def snake_case__ ( self : Optional[int] ) -> Any: return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def snake_case__ ( self : int , __a : Any , __a : Union[str, Any] , __a : List[Any] , __a : Dict , __a : Optional[Any] , __a : int , ) -> List[Any]: __UpperCAmelCase = UMTaModel(config=__a ) model.to(__a ) model.eval() __UpperCAmelCase = model( input_ids=__a , decoder_input_ids=__a , attention_mask=__a , decoder_attention_mask=__a , ) __UpperCAmelCase = model(input_ids=__a , decoder_input_ids=__a ) __UpperCAmelCase = result.last_hidden_state __UpperCAmelCase = result.past_key_values __UpperCAmelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__a ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def snake_case__ ( self : List[str] , __a : Any , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : Dict , __a : Any , ) -> Optional[Any]: __UpperCAmelCase = UMTaModel(config=__a ).get_decoder().to(__a ).eval() # first forward pass __UpperCAmelCase = model(__a , use_cache=__a ) __UpperCAmelCase = model(__a ) __UpperCAmelCase = model(__a , use_cache=__a ) self.parent.assertTrue(len(__a ) == len(__a ) ) self.parent.assertTrue(len(__a ) == len(__a ) + 1 ) __UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase = model(__a )['''last_hidden_state'''] __UpperCAmelCase = model(__a , past_key_values=__a )['''last_hidden_state'''] # select random slice __UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach() __UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) ) def snake_case__ ( self : List[Any] , __a : Union[str, Any] , __a : Dict , ) -> Optional[int]: __UpperCAmelCase = UMTaModel(config=__a ).to(__a ).half().eval() __UpperCAmelCase = model(**__a )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__a ).any().item() ) @require_torch class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): a_ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) a_ = (UMTaForConditionalGeneration,) if is_torch_available() else () a_ = ( { '''conversational''': UMTaForConditionalGeneration, '''feature-extraction''': UMTaModel, '''summarization''': UMTaForConditionalGeneration, '''text2text-generation''': UMTaForConditionalGeneration, '''translation''': UMTaForConditionalGeneration, '''question-answering''': UMTaForQuestionAnswering, } if is_torch_available() else {} ) a_ = True a_ = False a_ = False a_ = True a_ = True # The small UMT5 model needs higher percentages for CPU/MP tests a_ = [0.8, 0.9] def snake_case__ ( self : Tuple ) -> Optional[int]: __UpperCAmelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def snake_case__ ( self : str ) -> Optional[int]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() __UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__a ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__a , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def snake_case__ ( self : Union[str, Any] ) -> List[str]: __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__a ) def snake_case__ ( self : List[Any] ) -> str: __UpperCAmelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __UpperCAmelCase = self.model_tester.prepare_config_and_inputs() __UpperCAmelCase = config_and_inputs[0] __UpperCAmelCase = UMTaForConditionalGeneration(__a ).eval() model.to(__a ) __UpperCAmelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__a ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ), } for attn_name, (name, mask) in zip(__a , head_masking.items() ): __UpperCAmelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __UpperCAmelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=__a ) __UpperCAmelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__a , return_dict_in_generate=__a , **__a , ) # We check the state of decoder_attentions and cross_attentions just from the last step __UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def snake_case__ ( self : Optional[int] ) -> int: pass @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def snake_case__ ( self : Any ) -> int: __UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__a ).to(__a ) __UpperCAmelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__a , legacy=__a ) __UpperCAmelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __UpperCAmelCase = tokenizer(__a , return_tensors='''pt''' , padding=__a ).input_ids # fmt: off __UpperCAmelCase = torch.tensor( [ [ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1], ] ) # fmt: on torch.testing.assert_allclose(__a , __a ) __UpperCAmelCase = model.generate(input_ids.to(__a ) ) __UpperCAmelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __UpperCAmelCase = tokenizer.batch_decode(__a ) self.assertEqual(__a , __a )
262
0
"""simple docstring""" import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version a = get_logger(__name__) class lowercase_ : '''simple docstring''' UpperCAmelCase : List[Any] = """dummy_data""" UpperCAmelCase : str = """datasets""" UpperCAmelCase : Tuple = False def __init__( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] = None , _UpperCAmelCase : int = False , _UpperCAmelCase : int = True , _UpperCAmelCase : Any = None , ): _A = 0 _A = dataset_name _A = cache_dir _A = use_local_dummy_data _A = config # download_callbacks take a single url as input _A = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root _A = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general _A = str(lowercase__ ) # to be downloaded _A = None _A = None @property def lowerCAmelCase_ ( self : List[Any] ): if self._dummy_file is None: _A = self.download_dummy_data() return self._dummy_file @property def lowerCAmelCase_ ( self : str ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('dummy' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('dummy' , self.version_name ) @property def lowerCAmelCase_ ( self : str ): return os.path.join(self.dummy_data_folder , 'dummy_data.zip' ) def lowerCAmelCase_ ( self : Tuple ): _A = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) _A = cached_path( lowercase__ , cache_dir=self.cache_dir , extract_compressed_file=lowercase__ , force_extract=lowercase__ ) return os.path.join(lowercase__ , self.dummy_file_name ) @property def lowerCAmelCase_ ( self : str ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def lowerCAmelCase_ ( self : int ): if self._bucket_url is None: _A = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) ) return self._bucket_url @property def lowerCAmelCase_ ( self : Any ): if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Tuple , *_UpperCAmelCase : str ): if self.load_existing_dummy_data: # dummy data is downloaded and tested _A = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned _A = self.dummy_file_name # special case when data_url is a dict if isinstance(lowercase__ , lowercase__ ): return self.create_dummy_data_dict(lowercase__ , lowercase__ ) elif isinstance(lowercase__ , (list, tuple) ): return self.create_dummy_data_list(lowercase__ , lowercase__ ) else: return self.create_dummy_data_single(lowercase__ , lowercase__ ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Any , *_UpperCAmelCase : Any ): return self.download_and_extract(lowercase__ ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ): return self.download_and_extract(lowercase__ ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] , *_UpperCAmelCase : Any , **_UpperCAmelCase : List[Any] ): return path def lowerCAmelCase_ ( self : List[str] ): return {} def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] ): _A = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowercase__ , lowercase__ ): for single_url in single_urls: download_callback(lowercase__ ) else: _A = single_urls download_callback(lowercase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowercase__ , lowercase__ ): _A = [os.path.join(lowercase__ , urllib.parse.quote_plus(Path(lowercase__ ).name ) ) for x in single_urls] else: _A = single_urls _A = os.path.join(lowercase__ , urllib.parse.quote_plus(Path(lowercase__ ).name ) ) _A = value # make sure that values are unique if all(isinstance(lowercase__ , lowercase__ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique _A = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ): _A = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one _A = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , lowercase__ ) ) for url in data_url ) _A = all( url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): _A = [data_url[0]] * len(lowercase__ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowercase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(lowercase__ , urllib.parse.quote_plus(single_url.split('/' )[-1] ) ) dummy_data_list.append(lowercase__ ) return dummy_data_list def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ): for download_callback in self.download_callbacks: download_callback(lowercase__ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus _A = os.path.join(lowercase__ , urllib.parse.quote_plus(data_url.split('/' )[-1] ) ) if os.path.exists(lowercase__ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def lowerCAmelCase_ ( self : Optional[Any] ): pass def lowerCAmelCase_ ( self : Any ): pass def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int ): def _iter_archive_members(_UpperCAmelCase : Union[str, Any] ): # this preserves the order of the members inside the ZIP archive _A = Path(self.dummy_file ).parent _A = path.relative_to(lowercase__ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: _A = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowercase__ ) _A = Path(lowercase__ ) _A = _iter_archive_members(lowercase__ ) if self.use_local_dummy_data else path.rglob('*' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('.', '__') ): yield file_path.relative_to(lowercase__ ).as_posix(), file_path.open('rb' ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Tuple ): if not isinstance(lowercase__ , lowercase__ ): _A = [paths] for path in paths: if os.path.isfile(lowercase__ ): if os.path.basename(lowercase__ ).startswith(('.', '__') ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowercase__ ): if os.path.basename(lowercase__ ).startswith(('.', '__') ): continue dirnames.sort() for filename in sorted(lowercase__ ): if filename.startswith(('.', '__') ): continue yield os.path.join(lowercase__ , lowercase__ )
703
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
505
0
import inspect import unittest class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Union[str, Any]: try: import diffusers # noqa: F401 except ImportError: assert False def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]: import diffusers from diffusers.dependency_versions_table import deps A : int =inspect.getmembers(SCREAMING_SNAKE_CASE__ , inspect.isclass ) for cls_name, cls_module in all_classes: if "dummy_" in cls_module.__module__: for backend in cls_module._backends: if backend == "k_diffusion": A : Optional[Any] ='k-diffusion' elif backend == "invisible_watermark": A : Union[str, Any] ='invisible-watermark' assert backend in deps, f'{backend} is not in the deps table!'
305
def A__ ( lowercase: int ) -> bool: if not isinstance(lowercase, lowercase ): A : Any =F'Input value of [number={number}] must be an integer' raise TypeError(lowercase ) if number < 0: return False A : Union[str, Any] =number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
305
1
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] ): if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) lowerCamelCase__ = torch.permute(__A , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__A ): # linear layer lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) lowerCamelCase__ = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowerCamelCase__ = flax_key_tuple[:-1] + ("""weight""",) return flax_key_tuple, flax_tensor def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ): if "metadata" in layer: lowerCamelCase__ = layer.split("""metadata""" ) lowerCamelCase__ = """""".join(split_layer[0] )[:-1] lowerCamelCase__ = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )] elif "kvstore" in layer: lowerCamelCase__ = layer.split("""kvstore""" ) lowerCamelCase__ = """""".join(split_layer[0] )[:-1] lowerCamelCase__ = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )] else: lowerCamelCase__ = layer.split("""/""" ) lowerCamelCase__ = """/""".join(split_layer[:-1] ) lowerCamelCase__ = (split_layer[-1],) if "kvstore/path" in layer: lowerCamelCase__ = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: lowerCamelCase__ = """file""" else: lowerCamelCase__ = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A__ ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ): lowerCamelCase__ = rename_keys(__A ) lowerCamelCase__ = {} for k, v in current_block.items(): lowerCamelCase__ = v lowerCamelCase__ = new_current_block torch.save(__A , __A ) def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str = WEIGHTS_NAME ): lowerCamelCase__ = convert_file_size_to_int(__A ) lowerCamelCase__ = [] lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = 0 os.makedirs(__A , exist_ok=__A ) with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp: lowerCamelCase__ = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""] lowerCamelCase__ = flatten_dict(__A , sep="""/""" ) lowerCamelCase__ = {} for layer in checkpoint_info.keys(): lowerCamelCase__ = get_key_and_tensorstore_dict( __A , __A , __A ) if curr_real_layer_name in all_layers: lowerCamelCase__ = content else: lowerCamelCase__ = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file lowerCamelCase__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() lowerCamelCase__ = torch.tensor(__A ) lowerCamelCase__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts lowerCamelCase__ = rename_base_flax_keys(tuple(key.split("""/""" ) ) , __A ) lowerCamelCase__ = """/""".join(__A ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: lowerCamelCase__ = os.path.join( __A , weights_name.replace(""".bin""" , F'''-{len(__A )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__A , __A ) sharded_state_dicts.append(current_block.keys() ) del current_block lowerCamelCase__ = {} lowerCamelCase__ = 0 lowerCamelCase__ = raw_weights.to(getattr(__A , __A ) ) current_block_size += weight_size total_size += weight_size # Add the last block lowerCamelCase__ = os.path.join(__A , weights_name.replace(""".bin""" , F'''-{len(__A )+1:05d}-of-???.bin''' ) ) rename_and_save_block(__A , __A ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(__A ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index lowerCamelCase__ = {} lowerCamelCase__ = {} for idx, shard in enumerate(__A ): lowerCamelCase__ = weights_name.replace( """.bin""" , F'''-{idx+1:05d}-of-{len(__A ):05d}.bin''' ) # len(sharded_state_dicts):05d} lowerCamelCase__ = os.path.join(__A , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__A , os.path.join(__A , __A ) ) lowerCamelCase__ = shard for key in shard: lowerCamelCase__ = shard_file # Add the metadata lowerCamelCase__ = {"""total_size""": total_size} lowerCamelCase__ = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(__A , __A ) , """w""" , encoding="""utf-8""" ) as f: lowerCamelCase__ = json.dumps(__A , indent=2 , sort_keys=__A ) + """\n""" f.write(__A ) return metadata, index if __name__ == "__main__": UpperCamelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600', type=str, required=False, help='Path to a directory containing a folder per layer. Follows the original Google format.', ) parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size') parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model') parser.add_argument( '--pytorch_dump_folder_path', default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted', type=str, required=False, help='Path to the output pytorch model.', ) UpperCamelCase : str = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A__ ( ): from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer lowerCamelCase__ = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" ) config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" ) lowerCamelCase__ = SwitchTransformersForConditionalGeneration.from_pretrained( """/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" ) lowerCamelCase__ = TaTokenizer.from_pretrained("""t5-small""" ) lowerCamelCase__ = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""" lowerCamelCase__ = tokenizer(__A , return_tensors="""pt""" ).input_ids lowerCamelCase__ = model.generate(__A , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
714
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase__ (unittest.TestCase ): '''simple docstring''' def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=18 ,_lowerCAmelCase=30 ,_lowerCAmelCase=4_00 ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,_lowerCAmelCase=[0.5, 0.5, 0.5] ,): lowerCamelCase__ = size if size is not None else {"""shortest_edge""": 18} lowerCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = num_channels lowerCamelCase__ = image_size lowerCamelCase__ = min_resolution lowerCamelCase__ = max_resolution lowerCamelCase__ = do_resize lowerCamelCase__ = size lowerCamelCase__ = do_center_crop lowerCamelCase__ = crop_size lowerCamelCase__ = do_normalize lowerCamelCase__ = image_mean lowerCamelCase__ = image_std def UpperCamelCase_ ( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase__ (a ,unittest.TestCase ): '''simple docstring''' _UpperCamelCase = LevitImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): lowerCamelCase__ = LevitImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_mean""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""image_std""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_normalize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_resize""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""do_center_crop""" ) ) self.assertTrue(hasattr(_lowerCAmelCase ,"""size""" ) ) def UpperCamelCase_ ( self ): lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} ) lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} ) def UpperCamelCase_ ( self ): pass def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,Image.Image ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,np.ndarray ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) def UpperCamelCase_ ( self ): # Initialize image_processing lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_lowerCAmelCase ,torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase ,torch.Tensor ) # Test not batched input lowerCamelCase__ = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,) # Test batched lowerCamelCase__ = image_processing(_lowerCAmelCase ,return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) ,)
9
0
import os import sys import unittest SCREAMING_SNAKE_CASE :str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""") SCREAMING_SNAKE_CASE :Union[str, Any] = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""") class __magic_name__ ( unittest.TestCase ): def UpperCAmelCase_ ( self )-> Any: UpperCamelCase_ = get_test_to_tester_mapping(_UpperCAmelCase ) UpperCamelCase_ = get_test_to_tester_mapping(_UpperCAmelCase ) UpperCamelCase_ = {"BertModelTest": "BertModelTester"} UpperCamelCase_ = { "BlipModelTest": "BlipModelTester", "BlipTextImageModelTest": "BlipTextImageModelsModelTester", "BlipTextModelTest": "BlipTextModelTester", "BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester", "BlipVQAModelTest": "BlipVQAModelTester", "BlipVisionModelTest": "BlipVisionModelTester", } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def UpperCAmelCase_ ( self )-> Dict: UpperCamelCase_ = get_model_to_test_mapping(_UpperCAmelCase ) UpperCamelCase_ = get_model_to_test_mapping(_UpperCAmelCase ) UpperCamelCase_ = { "BertForMaskedLM": ["BertModelTest"], "BertForMultipleChoice": ["BertModelTest"], "BertForNextSentencePrediction": ["BertModelTest"], "BertForPreTraining": ["BertModelTest"], "BertForQuestionAnswering": ["BertModelTest"], "BertForSequenceClassification": ["BertModelTest"], "BertForTokenClassification": ["BertModelTest"], "BertLMHeadModel": ["BertModelTest"], "BertModel": ["BertModelTest"], } UpperCamelCase_ = { "BlipForConditionalGeneration": ["BlipTextImageModelTest"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"], "BlipForQuestionAnswering": ["BlipVQAModelTest"], "BlipModel": ["BlipModelTest"], "BlipTextModel": ["BlipTextModelTest"], "BlipVisionModel": ["BlipVisionModelTest"], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def UpperCAmelCase_ ( self )-> Union[str, Any]: UpperCamelCase_ = get_model_to_tester_mapping(_UpperCAmelCase ) UpperCamelCase_ = get_model_to_tester_mapping(_UpperCAmelCase ) UpperCamelCase_ = { "BertForMaskedLM": ["BertModelTester"], "BertForMultipleChoice": ["BertModelTester"], "BertForNextSentencePrediction": ["BertModelTester"], "BertForPreTraining": ["BertModelTester"], "BertForQuestionAnswering": ["BertModelTester"], "BertForSequenceClassification": ["BertModelTester"], "BertForTokenClassification": ["BertModelTester"], "BertLMHeadModel": ["BertModelTester"], "BertModel": ["BertModelTester"], } UpperCamelCase_ = { "BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"], "BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"], "BlipForQuestionAnswering": ["BlipVQAModelTester"], "BlipModel": ["BlipModelTester"], "BlipTextModel": ["BlipTextModelTester"], "BlipVisionModel": ["BlipVisionModelTester"], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
628
"""simple docstring""" import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase__ : '''simple docstring''' def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def lowercase__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : int ) -> Dict: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def lowercase__ ( self : Dict ) -> str: '''simple docstring''' UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = 300 return config def lowercase__ ( self : int ) -> List[Any]: '''simple docstring''' ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = self.prepare_config_and_inputs() UpperCAmelCase_ = True UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any: '''simple docstring''' UpperCAmelCase_ = MraModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) UpperCAmelCase_ = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int: '''simple docstring''' UpperCAmelCase_ = True UpperCAmelCase_ = MraModel(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) UpperCAmelCase_ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , ) UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any: '''simple docstring''' UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int: '''simple docstring''' UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str: '''simple docstring''' UpperCAmelCase_ = self.num_choices UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = False UpperCamelCase = () def lowercase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = MraModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def lowercase__ ( self : Optional[int] ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ = type self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowercase__ ( self : List[str] ) -> str: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase ) def lowercase__ ( self : Any ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase ) def lowercase__ ( self : Optional[int] ) -> str: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase ) def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def lowercase__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @unittest.skip(reason="MRA does not output attentions" ) def lowercase__ ( self : Optional[Any] ) -> Any: '''simple docstring''' return @require_torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Any ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase_ = model(_UpperCAmelCase )[0] UpperCAmelCase_ = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , _UpperCAmelCase ) UpperCAmelCase_ = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) ) @slow def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase_ = model(_UpperCAmelCase )[0] UpperCAmelCase_ = 50265 UpperCAmelCase_ = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , _UpperCAmelCase ) UpperCAmelCase_ = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) ) @slow def lowercase__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): UpperCAmelCase_ = model(_UpperCAmelCase )[0] UpperCAmelCase_ = 50265 UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , _UpperCAmelCase ) UpperCAmelCase_ = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
82
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCamelCase__ = logging.get_logger(__name__) class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowercase__ : Dict = """maskformer-swin""" lowercase__ : Optional[Any] = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Optional[int] , lowercase : str=224 , lowercase : Optional[int]=4 , lowercase : Optional[int]=3 , lowercase : Optional[int]=96 , lowercase : List[Any]=[2, 2, 6, 2] , lowercase : int=[3, 6, 12, 24] , lowercase : Union[str, Any]=7 , lowercase : Union[str, Any]=4.0 , lowercase : int=True , lowercase : str=0.0 , lowercase : Dict=0.0 , lowercase : Tuple=0.1 , lowercase : Union[str, Any]="gelu" , lowercase : int=False , lowercase : List[Any]=0.02 , lowercase : List[str]=1E-5 , lowercase : Any=None , lowercase : Any=None , **lowercase : Optional[int] , ) -> Any: """simple docstring""" super().__init__(**lowercase ) __lowercase = image_size __lowercase = patch_size __lowercase = num_channels __lowercase = embed_dim __lowercase = depths __lowercase = len(lowercase ) __lowercase = num_heads __lowercase = window_size __lowercase = mlp_ratio __lowercase = qkv_bias __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = drop_path_rate __lowercase = hidden_act __lowercase = use_absolute_embeddings __lowercase = layer_norm_eps __lowercase = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase = int(embed_dim * 2 ** (len(lowercase ) - 1) ) __lowercase = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(lowercase ) + 1 )] __lowercase , __lowercase = get_aligned_output_features_output_indices( out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
634
import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor UpperCamelCase__ = random.Random() def UpperCAmelCase__ ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> str: if rng is None: __lowercase = global_rng __lowercase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , lowercase : Tuple , lowercase : Union[str, Any]=7 , lowercase : List[Any]=400 , lowercase : Any=2_000 , lowercase : Optional[int]=24 , lowercase : Any=24 , lowercase : List[str]=0.0 , lowercase : Dict=16_000 , lowercase : Union[str, Any]=True , lowercase : Dict=True , ) -> Any: """simple docstring""" __lowercase = parent __lowercase = batch_size __lowercase = min_seq_length __lowercase = max_seq_length __lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowercase = feature_size __lowercase = num_mel_bins __lowercase = padding_value __lowercase = sampling_rate __lowercase = return_attention_mask __lowercase = do_normalize def snake_case__ ( self : Optional[int] ) -> Any: """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case__ ( self : List[str] , lowercase : Tuple=False , lowercase : int=False ) -> Optional[Any]: """simple docstring""" def _flatten(lowercase : Optional[Any] ): return list(itertools.chain(*lowercase ) ) if equal_length: __lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowercase = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowercase = [np.asarray(lowercase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowercase__ : int = SpeechaTextFeatureExtractor if is_speech_available() else None def snake_case__ ( self : Dict ) -> Optional[int]: """simple docstring""" __lowercase = SpeechaTextFeatureExtractionTester(self ) def snake_case__ ( self : Tuple , lowercase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1E-3 ) ) def snake_case__ ( self : List[Any] ) -> str: """simple docstring""" __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = [np.asarray(lowercase ) for speech_input in speech_inputs] # Test feature size __lowercase = feature_extractor(lowercase , padding=lowercase , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input __lowercase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features __lowercase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) ) # Test batched __lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features __lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ): self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __lowercase = [floats_list((1, x) )[0] for x in (800, 800, 800)] __lowercase = np.asarray(lowercase ) __lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features __lowercase = feature_extractor(lowercase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ): self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-3 ) ) def snake_case__ ( self : List[str] ) -> Tuple: """simple docstring""" __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = ["""longest""", """max_length""", """do_not_pad"""] __lowercase = [None, 16, None] for max_length, padding in zip(lowercase , lowercase ): __lowercase = feature_extractor( lowercase , padding=lowercase , max_length=lowercase , return_attention_mask=lowercase ) __lowercase = inputs.input_features __lowercase = inputs.attention_mask __lowercase = [np.sum(lowercase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def snake_case__ ( self : Any ) -> Dict: """simple docstring""" __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = ["""longest""", """max_length""", """do_not_pad"""] __lowercase = [None, 16, None] for max_length, padding in zip(lowercase , lowercase ): __lowercase = feature_extractor( lowercase , max_length=lowercase , padding=lowercase , return_tensors="""np""" , return_attention_mask=lowercase ) __lowercase = inputs.input_features __lowercase = inputs.attention_mask __lowercase = [np.sum(lowercase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def snake_case__ ( self : str ) -> List[Any]: """simple docstring""" __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = feature_extractor( lowercase , padding="""max_length""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , ) __lowercase = inputs.input_features __lowercase = inputs.attention_mask __lowercase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def snake_case__ ( self : Tuple ) -> List[str]: """simple docstring""" __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = feature_extractor( lowercase , padding="""longest""" , max_length=4 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , ) __lowercase = inputs.input_features __lowercase = inputs.attention_mask __lowercase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) __lowercase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] __lowercase = feature_extractor( lowercase , padding="""longest""" , max_length=16 , truncation=lowercase , return_tensors="""np""" , return_attention_mask=lowercase , ) __lowercase = inputs.input_features __lowercase = inputs.attention_mask __lowercase = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def snake_case__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" import torch __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = np.random.rand(100 , 32 ).astype(np.floataa ) __lowercase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowercase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def snake_case__ ( self : Optional[int] , lowercase : Union[str, Any] ) -> int: """simple docstring""" from datasets import load_dataset __lowercase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech __lowercase = ds.sort("""id""" ).select(range(lowercase ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def snake_case__ ( self : str ) -> Any: """simple docstring""" __lowercase = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on __lowercase = self._load_datasamples(1 ) __lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowercase = feature_extractor(lowercase , return_tensors="""pt""" ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , lowercase , atol=1E-4 ) )
634
1
from __future__ import annotations from typing import Any class UpperCAmelCase : '''simple docstring''' def __init__( self : Dict , __lowercase : int ): """simple docstring""" snake_case_ = num_of_nodes snake_case_ = [] snake_case_ = {} def snake_case__ ( self : Union[str, Any] , __lowercase : int , __lowercase : int , __lowercase : int ): """simple docstring""" self.m_edges.append([u_node, v_node, weight] ) def snake_case__ ( self : Optional[int] , __lowercase : int ): """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def snake_case__ ( self : Optional[Any] , __lowercase : int ): """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: snake_case_ = self.find_component(__lowercase ) def snake_case__ ( self : List[Any] , __lowercase : list[int] , __lowercase : int , __lowercase : int ): """simple docstring""" if component_size[u_node] <= component_size[v_node]: snake_case_ = v_node component_size[v_node] += component_size[u_node] self.set_component(__lowercase ) elif component_size[u_node] >= component_size[v_node]: snake_case_ = self.find_component(__lowercase ) component_size[u_node] += component_size[v_node] self.set_component(__lowercase ) def snake_case__ ( self : List[str] ): """simple docstring""" snake_case_ = [] snake_case_ = 0 snake_case_ = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) snake_case_ = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: snake_case_ , snake_case_ , snake_case_ = edge snake_case_ = self.m_component[u] snake_case_ = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): snake_case_ = [u, v, w] for edge in minimum_weight_edge: if isinstance(__lowercase , __lowercase ): snake_case_ , snake_case_ , snake_case_ = edge snake_case_ = self.m_component[u] snake_case_ = self.m_component[v] if u_component != v_component: mst_weight += w self.union(__lowercase , __lowercase , __lowercase ) print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" ) num_of_components -= 1 snake_case_ = [-1] * self.m_num_of_nodes print(f"The total weight of the minimal spanning tree is: {mst_weight}" ) def lowerCamelCase__ ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
376
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue_model_parallelism.py''', '''model_name_or_path''': '''roberta-large''', '''instance_type''': '''ml.p3dn.24xlarge''', '''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2}, }, { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''roberta-large''', '''instance_type''': '''ml.p3dn.24xlarge''', '''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2}, }, ] ) class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self : int ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=__lowercase , ) assert hasattr(self , "env" ) def snake_case__ ( self : Any , __lowercase : Dict ): """simple docstring""" snake_case_ = { "enabled": True, "processes_per_host": 8, } snake_case_ = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } snake_case_ = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} snake_case_ = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 5_00, } , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version="py36" , ) def snake_case__ ( self : Tuple , __lowercase : Dict ): """simple docstring""" TrainingJobAnalytics(__lowercase ).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv" ) @parameterized.expand([(1,)] ) def snake_case__ ( self : Any , __lowercase : int ): """simple docstring""" snake_case_ = self.create_estimator(__lowercase ) # run training estimator.fit() # result dataframe snake_case_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) snake_case_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping snake_case_ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json" , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , __lowercase )
376
1
'''simple docstring''' from math import factorial, radians def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase = 18 , UpperCamelCase = 10 ): """simple docstring""" lowerCAmelCase__ : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians lowerCAmelCase__ : Union[str, Any] = radians(UpperCamelCase ) lowerCAmelCase__ : Dict = angle_in_radians lowerCAmelCase__ : Any = 3 lowerCAmelCase__ : Optional[int] = -1 for _ in range(UpperCamelCase ): result += (b * (angle_in_radians**a)) / factorial(UpperCamelCase ) lowerCAmelCase__ : int = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": __import__('''doctest''').testmod()
160
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ): """simple docstring""" lowerCAmelCase__ : Optional[int] = tau * frequency / samplerate lowerCAmelCase__ : List[Any] = sin(UpperCamelCase ) lowerCAmelCase__ : List[Any] = cos(UpperCamelCase ) lowerCAmelCase__ : List[Any] = _sin / (2 * q_factor) lowerCAmelCase__ : Optional[Any] = (1 - _cos) / 2 lowerCAmelCase__ : Tuple = 1 - _cos lowerCAmelCase__ : Tuple = 1 + alpha lowerCAmelCase__ : Dict = -2 * _cos lowerCAmelCase__ : Optional[Any] = 1 - alpha lowerCAmelCase__ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ): """simple docstring""" lowerCAmelCase__ : Optional[int] = tau * frequency / samplerate lowerCAmelCase__ : Dict = sin(UpperCamelCase ) lowerCAmelCase__ : int = cos(UpperCamelCase ) lowerCAmelCase__ : List[str] = _sin / (2 * q_factor) lowerCAmelCase__ : Tuple = (1 + _cos) / 2 lowerCAmelCase__ : Optional[int] = -1 - _cos lowerCAmelCase__ : int = 1 + alpha lowerCAmelCase__ : int = -2 * _cos lowerCAmelCase__ : Union[str, Any] = 1 - alpha lowerCAmelCase__ : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ): """simple docstring""" lowerCAmelCase__ : Optional[int] = tau * frequency / samplerate lowerCAmelCase__ : List[Any] = sin(UpperCamelCase ) lowerCAmelCase__ : str = cos(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = _sin / (2 * q_factor) lowerCAmelCase__ : str = _sin / 2 lowerCAmelCase__ : Union[str, Any] = 0 lowerCAmelCase__ : Optional[Any] = -ba lowerCAmelCase__ : Union[str, Any] = 1 + alpha lowerCAmelCase__ : Any = -2 * _cos lowerCAmelCase__ : Dict = 1 - alpha lowerCAmelCase__ : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) ): """simple docstring""" lowerCAmelCase__ : Dict = tau * frequency / samplerate lowerCAmelCase__ : Optional[Any] = sin(UpperCamelCase ) lowerCAmelCase__ : Any = cos(UpperCamelCase ) lowerCAmelCase__ : List[Any] = _sin / (2 * q_factor) lowerCAmelCase__ : Union[str, Any] = 1 - alpha lowerCAmelCase__ : Dict = -2 * _cos lowerCAmelCase__ : int = 1 + alpha lowerCAmelCase__ : Dict = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ): """simple docstring""" lowerCAmelCase__ : int = tau * frequency / samplerate lowerCAmelCase__ : List[str] = sin(UpperCamelCase ) lowerCAmelCase__ : List[Any] = cos(UpperCamelCase ) lowerCAmelCase__ : List[Any] = _sin / (2 * q_factor) lowerCAmelCase__ : List[Any] = 10 ** (gain_db / 40) lowerCAmelCase__ : List[str] = 1 + alpha * big_a lowerCAmelCase__ : List[str] = -2 * _cos lowerCAmelCase__ : Union[str, Any] = 1 - alpha * big_a lowerCAmelCase__ : Dict = 1 + alpha / big_a lowerCAmelCase__ : Optional[int] = -2 * _cos lowerCAmelCase__ : Tuple = 1 - alpha / big_a lowerCAmelCase__ : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ): """simple docstring""" lowerCAmelCase__ : Tuple = tau * frequency / samplerate lowerCAmelCase__ : Optional[int] = sin(UpperCamelCase ) lowerCAmelCase__ : Any = cos(UpperCamelCase ) lowerCAmelCase__ : List[Any] = _sin / (2 * q_factor) lowerCAmelCase__ : List[Any] = 10 ** (gain_db / 40) lowerCAmelCase__ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos lowerCAmelCase__ : Any = (big_a + 1) + (big_a - 1) * _cos lowerCAmelCase__ : str = (big_a - 1) - (big_a + 1) * _cos lowerCAmelCase__ : Dict = (big_a - 1) + (big_a + 1) * _cos lowerCAmelCase__ : Optional[Any] = 2 * sqrt(UpperCamelCase ) * alpha lowerCAmelCase__ : Optional[int] = big_a * (pmc + aaa) lowerCAmelCase__ : Optional[int] = 2 * big_a * mpc lowerCAmelCase__ : Union[str, Any] = big_a * (pmc - aaa) lowerCAmelCase__ : int = ppmc + aaa lowerCAmelCase__ : List[Any] = -2 * pmpc lowerCAmelCase__ : Tuple = ppmc - aaa lowerCAmelCase__ : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 / sqrt(2 ) , ): """simple docstring""" lowerCAmelCase__ : Tuple = tau * frequency / samplerate lowerCAmelCase__ : int = sin(UpperCamelCase ) lowerCAmelCase__ : Optional[int] = cos(UpperCamelCase ) lowerCAmelCase__ : Dict = _sin / (2 * q_factor) lowerCAmelCase__ : Optional[int] = 10 ** (gain_db / 40) lowerCAmelCase__ : Dict = (big_a + 1) - (big_a - 1) * _cos lowerCAmelCase__ : Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos lowerCAmelCase__ : int = (big_a - 1) - (big_a + 1) * _cos lowerCAmelCase__ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos lowerCAmelCase__ : str = 2 * sqrt(UpperCamelCase ) * alpha lowerCAmelCase__ : Optional[Any] = big_a * (ppmc + aaa) lowerCAmelCase__ : List[Any] = -2 * big_a * pmpc lowerCAmelCase__ : Union[str, Any] = big_a * (ppmc - aaa) lowerCAmelCase__ : Tuple = pmc + aaa lowerCAmelCase__ : Dict = 2 * mpc lowerCAmelCase__ : Union[str, Any] = pmc - aaa lowerCAmelCase__ : int = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
160
1