code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase : Union[str, Any] = { 'configuration_xlm_roberta_xl': [ 'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMRobertaXLConfig', 'XLMRobertaXLOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = [ 'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMRobertaXLForCausalLM', 'XLMRobertaXLForMaskedLM', 'XLMRobertaXLForMultipleChoice', 'XLMRobertaXLForQuestionAnswering', 'XLMRobertaXLForSequenceClassification', 'XLMRobertaXLForTokenClassification', 'XLMRobertaXLModel', 'XLMRobertaXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]: """simple docstring""" lowercase__ = [] create_all_state(1 , A , A , [] , A ) return result def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None: """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def _SCREAMING_SNAKE_CASE (A ) -> None: """simple docstring""" for i in total_list: print(*A ) if __name__ == "__main__": lowerCamelCase : Tuple = 4 lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = generate_all_combinations(n, k) print_all_state(total_list)
2
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowerCamelCase : int = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Tuple , *UpperCamelCase : Optional[Any] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
2
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase : Optional[Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCamelCase : Tuple = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCamelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCamelCase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCamelCase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCamelCase : Optional[int] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCamelCase : Dict = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) ) lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _SCREAMING_SNAKE_CASE (A = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def _SCREAMING_SNAKE_CASE () -> Tuple: """simple docstring""" lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS] lowercase__ = poker_hands.copy() shuffle(A ) lowercase__ = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__ = True lowercase__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = 0 lowercase__ = os.path.abspath(os.path.dirname(A ) ) lowercase__ = os.path.join(A , '''poker_hands.txt''' ) with open(A ) as file_hand: for line in file_hand: lowercase__ = line[:14].strip() lowercase__ = line[15:].strip() lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A ) lowercase__ = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
2
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = StableDiffusionPanoramaPipeline lowerCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase__ (self : Dict ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler() torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__ = CLIPTextModel(UpperCamelCase ) lowercase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase__ = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : List[str]=0 ): '''simple docstring''' lowercase__ = torch.manual_seed(UpperCamelCase ) lowercase__ = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, # Setting height and width to None to prevent OOMs on CPU. '''height''': None, '''width''': None, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**UpperCamelCase ) lowercase__ = sd_pipe.to(UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = self.get_dummy_inputs(UpperCamelCase ) lowercase__ = sd_pipe(**UpperCamelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3 ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**UpperCamelCase ) lowercase__ = sd_pipe.to(UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = self.get_dummy_inputs(UpperCamelCase ) lowercase__ = '''french fries''' lowercase__ = sd_pipe(**UpperCamelCase , negative_prompt=UpperCamelCase ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**UpperCamelCase ) lowercase__ = sd_pipe.to(UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = self.get_dummy_inputs(UpperCamelCase ) lowercase__ = sd_pipe(**UpperCamelCase , view_batch_size=2 ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = EulerAncestralDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' ) lowercase__ = StableDiffusionPanoramaPipeline(**UpperCamelCase ) lowercase__ = sd_pipe.to(UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = self.get_dummy_inputs(UpperCamelCase ) lowercase__ = sd_pipe(**UpperCamelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = PNDMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCamelCase ) lowercase__ = StableDiffusionPanoramaPipeline(**UpperCamelCase ) lowercase__ = sd_pipe.to(UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = self.get_dummy_inputs(UpperCamelCase ) lowercase__ = sd_pipe(**UpperCamelCase ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Dict , UpperCamelCase : str=0 ): '''simple docstring''' lowercase__ = torch.manual_seed(UpperCamelCase ) lowercase__ = { '''prompt''': '''a photo of the dolomites''', '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = '''stabilityai/stable-diffusion-2-base''' lowercase__ = DDIMScheduler.from_pretrained(UpperCamelCase , subfolder='''scheduler''' ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**UpperCamelCase ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__ = np.array( [ 0.36_96_83_92, 0.27_02_53_72, 0.32_44_67_66, 0.28_37_93_87, 0.36_36_32_74, 0.30_73_33_47, 0.27_10_00_27, 0.27_05_41_25, 0.25_53_60_96, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained( '''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCamelCase ) lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**UpperCamelCase ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__ = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = 0 def callback_fn(UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor ) -> None: lowercase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array( [ 0.18_68_18_69, 0.33_90_78_16, 0.5_36_12_76, 0.14_43_28_65, -0.02_85_66_11, -0.73_94_11_23, 0.23_39_79_87, 0.47_32_26_82, -0.37_82_31_64, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array( [ 0.18_53_96_45, 0.33_98_72_48, 0.5_37_85_59, 0.14_43_71_42, -0.02_45_52_61, -0.7_33_83_17, 0.23_99_07_55, 0.47_35_62_72, -0.3_78_65_05, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowercase__ = False lowercase__ = '''stabilityai/stable-diffusion-2-base''' lowercase__ = DDIMScheduler.from_pretrained(UpperCamelCase , subfolder='''scheduler''' ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() pipe(**UpperCamelCase , callback=UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ = '''stabilityai/stable-diffusion-2-base''' lowercase__ = DDIMScheduler.from_pretrained(UpperCamelCase , subfolder='''scheduler''' ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__ = self.get_inputs() lowercase__ = pipe(**UpperCamelCase ) lowercase__ = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
2
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCamelCase : str = parser.parse_args() if args.model_type == "bert": lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase : Any = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCamelCase : int = model.state_dict() lowerCamelCase : int = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] lowerCamelCase : Tuple = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] lowerCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] lowerCamelCase : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] lowerCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] lowerCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] lowerCamelCase : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowerCamelCase : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""] lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
2
1
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class __lowerCAmelCase (yaml.SafeLoader ): '''simple docstring''' def UpperCamelCase__ (self : Any , UpperCamelCase : List[str] ): '''simple docstring''' lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value] lowercase__ = [tuple(UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else key for key in keys] lowercase__ = Counter(UpperCamelCase ) lowercase__ = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(f"Got duplicate yaml keys: {duplicate_keys}" ) def UpperCamelCase__ (self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int]=False ): '''simple docstring''' lowercase__ = super().construct_mapping(UpperCamelCase , deep=UpperCamelCase ) self._check_no_duplicates_on_constructed_node(UpperCamelCase ) return mapping def _SCREAMING_SNAKE_CASE (A ) -> Tuple[Optional[str], str]: """simple docstring""" lowercase__ = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowercase__ = full_content[1:].index('''---''' ) + 1 lowercase__ = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(A ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def UpperCamelCase__ (cls : List[str] , UpperCamelCase : Path ): '''simple docstring''' with open(UpperCamelCase , encoding='''utf-8''' ) as readme_file: lowercase__ ,lowercase__ = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCamelCase ) else: return cls() def UpperCamelCase__ (self : List[str] , UpperCamelCase : Path ): '''simple docstring''' if path.exists(): with open(UpperCamelCase , encoding='''utf-8''' ) as readme_file: lowercase__ = readme_file.read() else: lowercase__ = None lowercase__ = self._to_readme(UpperCamelCase ) with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if readme_content is not None: lowercase__ ,lowercase__ = _split_yaml_from_readme(UpperCamelCase ) lowercase__ = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: lowercase__ = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def UpperCamelCase__ (cls : int , UpperCamelCase : str ): '''simple docstring''' lowercase__ = yaml.load(UpperCamelCase , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowercase__ = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCamelCase , allow_unicode=UpperCamelCase , encoding='''utf-8''' , ).decode('''utf-8''' ) lowerCamelCase : List[str] = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser lowerCamelCase : Dict = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') lowerCamelCase : Optional[Any] = ap.parse_args() lowerCamelCase : Optional[int] = Path(args.readme_filepath) lowerCamelCase : List[str] = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
2
'''simple docstring''' from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ): '''simple docstring''' lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
2
1
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def _SCREAMING_SNAKE_CASE (A , A , A , A , A ) -> np.ndarray: """simple docstring""" lowercase__ = cva.getAffineTransform(A , A ) return cva.warpAffine(A , A , (rows, cols) ) if __name__ == "__main__": # read original image lowerCamelCase : Dict = cva.imread( str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg') ) # turn image in gray scale value lowerCamelCase : str = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape lowerCamelCase , lowerCamelCase : Dict = gray_img.shape # set different points to rotate image lowerCamelCase : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) lowerCamelCase : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) lowerCamelCase : Any = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list lowerCamelCase : List[Any] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations lowerCamelCase : str = plt.figure(1) lowerCamelCase : Any = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3'] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray') plt.title(titles[i]) plt.axis('off') plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5) plt.show()
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Tuple = """cvt""" def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) lowercase__ = num_channels lowercase__ = patch_sizes lowercase__ = patch_stride lowercase__ = patch_padding lowercase__ = embed_dim lowercase__ = num_heads lowercase__ = depth lowercase__ = mlp_ratio lowercase__ = attention_drop_rate lowercase__ = drop_rate lowercase__ = drop_path_rate lowercase__ = qkv_bias lowercase__ = cls_token lowercase__ = qkv_projection_method lowercase__ = kernel_qkv lowercase__ = padding_kv lowercase__ = stride_kv lowercase__ = padding_q lowercase__ = stride_q lowercase__ = initializer_range lowercase__ = layer_norm_eps
2
1
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[Any]: """simple docstring""" lowercase__ = OmegaConf.load(A ) lowercase__ = torch.load(A , map_location='''cpu''' )['''model'''] lowercase__ = list(state_dict.keys() ) # extract state_dict for VQVAE lowercase__ = {} lowercase__ = '''first_stage_model.''' for key in keys: if key.startswith(A ): lowercase__ = state_dict[key] # extract state_dict for UNetLDM lowercase__ = {} lowercase__ = '''model.diffusion_model.''' for key in keys: if key.startswith(A ): lowercase__ = state_dict[key] lowercase__ = config.model.params.first_stage_config.params lowercase__ = config.model.params.unet_config.params lowercase__ = VQModel(**A ).eval() vqvae.load_state_dict(A ) lowercase__ = UNetLDMModel(**A ).eval() unet.load_state_dict(A ) lowercase__ = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=A , ) lowercase__ = LDMPipeline(A , A , A ) pipeline.save_pretrained(A ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) lowerCamelCase : Dict = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
2
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase : List[Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowerCamelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase : str = np.expand_dims(test_image, axis=0) lowerCamelCase : List[str] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase : Any = 'Normal' if result[0][0] == 1: lowerCamelCase : Any = 'Abnormality detected'
2
1
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Any = logging.get_logger(__name__) lowerCamelCase : Dict = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = """time_series_transformer""" lowerCAmelCase__ : Optional[int] = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__(self : Any , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "student_t" , UpperCamelCase : str = "nll" , UpperCamelCase : int = 1 , UpperCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , UpperCamelCase : Optional[Union[str, bool]] = "mean" , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : int = 32 , UpperCamelCase : int = 32 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : bool = True , UpperCamelCase : str = "gelu" , UpperCamelCase : int = 64 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : int = 100 , UpperCamelCase : float = 0.02 , UpperCamelCase : Tuple=True , **UpperCamelCase : Optional[Any] , ): '''simple docstring''' lowercase__ = prediction_length lowercase__ = context_length or prediction_length lowercase__ = distribution_output lowercase__ = loss lowercase__ = input_size lowercase__ = num_time_features lowercase__ = lags_sequence lowercase__ = scaling lowercase__ = num_dynamic_real_features lowercase__ = num_static_real_features lowercase__ = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(UpperCamelCase ) != num_static_categorical_features: raise ValueError( '''The cardinality should be a list of the same length as `num_static_categorical_features`''' ) lowercase__ = cardinality else: lowercase__ = [0] if embedding_dimension and num_static_categorical_features > 0: if len(UpperCamelCase ) != num_static_categorical_features: raise ValueError( '''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' ) lowercase__ = embedding_dimension else: lowercase__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowercase__ = num_parallel_samples # Transformer architecture configuration lowercase__ = input_size * len(UpperCamelCase ) + self._number_of_features lowercase__ = d_model lowercase__ = encoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = encoder_ffn_dim lowercase__ = decoder_ffn_dim lowercase__ = encoder_layers lowercase__ = decoder_layers lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = activation_function lowercase__ = init_std lowercase__ = use_cache super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
2
'''simple docstring''' class __lowerCAmelCase : # Public class to implement a graph '''simple docstring''' def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = row lowercase__ = col lowercase__ = graph def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): # And finally, count all islands. '''simple docstring''' lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += 1 return count
2
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __lowerCAmelCase : '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple=13 , UpperCamelCase : Dict=30 , UpperCamelCase : int=2 , UpperCamelCase : Any=3 , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : str=4 , UpperCamelCase : Any=37 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : str=10 , UpperCamelCase : Any=0.02 , UpperCamelCase : str=3 , UpperCamelCase : Optional[int]=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = num_patches + 1 def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase__ (self : Tuple ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' lowercase__ = TFViTModel(config=UpperCamelCase ) lowercase__ = model(UpperCamelCase , training=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. lowercase__ = self.image_size // 2 lowercase__ = pixel_values[:, :, :image_size, :image_size] lowercase__ = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase , training=UpperCamelCase ) lowercase__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCamelCase__ (self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' lowercase__ = self.type_sequence_label_size lowercase__ = TFViTForImageClassification(UpperCamelCase ) lowercase__ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. lowercase__ = self.image_size // 2 lowercase__ = pixel_values[:, :, :image_size, :image_size] lowercase__ = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase , training=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase__ = 1 lowercase__ = TFViTForImageClassification(UpperCamelCase ) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase__ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs lowercase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCAmelCase__ : Tuple = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) lowerCAmelCase__ : Optional[int] = False lowerCAmelCase__ : Any = False lowerCAmelCase__ : List[str] = False def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = TFViTModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase__ (self : str ): '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' pass def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , tf.keras.layers.Layer ) ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(UpperCamelCase ) lowercase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(UpperCamelCase ) def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ (self : str ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=UpperCamelCase , return_tensors='''tf''' ) # forward pass lowercase__ = model(**UpperCamelCase ) # verify the logits lowercase__ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) lowercase__ = tf.constant([-0.27_44, 0.82_15, -0.08_36] ) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
2
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase : Tuple = 'naver-clova-ix/donut-base' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__ = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__ = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' from __future__ import annotations class __lowerCAmelCase : '''simple docstring''' def __init__(self : Any , UpperCamelCase : int = 0 ): '''simple docstring''' lowercase__ = key def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) lowercase__ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(UpperCamelCase ) ^ key ) for ch in content] def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) lowercase__ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(UpperCamelCase ) ^ key ) for ch in content] def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int = 0 ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) lowercase__ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned lowercase__ = '''''' for ch in content: ans += chr(ord(UpperCamelCase ) ^ key ) return ans def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : int = 0 ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) lowercase__ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned lowercase__ = '''''' for ch in content: ans += chr(ord(UpperCamelCase ) ^ key ) return ans def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : int = 0 ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) try: with open(UpperCamelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(UpperCamelCase , UpperCamelCase ) ) except OSError: return False return True def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) try: with open(UpperCamelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(UpperCamelCase , UpperCamelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A ) -> bool: """simple docstring""" return len(set(A ) ) == len(A ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
'''simple docstring''' from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
2
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCamelCase : Any = None lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : List[str] = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase : Any = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""] lowerCAmelCase__ : Optional[int] = TaTokenizer lowerCAmelCase__ : List[int] = [] def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True lowercase__ = extra_ids @staticmethod def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , ) return max_model_length def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowercase__ = os.path.join( UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) logger.info(f"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase__ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
2
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCamelCase : Union[str, Any] = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = ShapEImgaImgPipeline lowerCAmelCase__ : List[str] = ["""image"""] lowerCAmelCase__ : Any = ["""image"""] lowerCAmelCase__ : Any = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowerCAmelCase__ : Tuple = False @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : str ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase__ (self : int ): '''simple docstring''' return 8 @property def UpperCamelCase__ (self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase__ = CLIPVisionModel(UpperCamelCase ) return model @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor @property def UpperCamelCase__ (self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowercase__ = PriorTransformer(**UpperCamelCase ) return model @property def UpperCamelCase__ (self : int ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowercase__ = ShapERenderer(**UpperCamelCase ) return model def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.dummy_prior lowercase__ = self.dummy_image_encoder lowercase__ = self.dummy_image_processor lowercase__ = self.dummy_renderer lowercase__ = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , ) lowercase__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) lowercase__ = output.images[0] lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = torch_device == '''cpu''' lowercase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = 1 lowercase__ = 2 lowercase__ = self.get_dummy_inputs(UpperCamelCase ) for key in inputs.keys(): if key in self.batch_params: lowercase__ = batch_size * [inputs[key]] lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) lowercase__ = pipe( UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) lowerCamelCase : Optional[Any] = logging.getLogger(__name__) @dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : str lowerCAmelCase__ : str lowerCAmelCase__ : Optional[str] = None lowerCAmelCase__ : Optional[str] = None lowerCAmelCase__ : Optional[str] = None @dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : List[int] lowerCAmelCase__ : Optional[List[int]] = None lowerCAmelCase__ : Optional[List[int]] = None lowerCAmelCase__ : Optional[Union[int, float]] = None lowerCAmelCase__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[InputFeatures] def __init__(self : Optional[int] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : List[Any]=False , UpperCamelCase : bool = False , ): '''simple docstring''' lowercase__ = hans_processors[task]() lowercase__ = os.path.join( UpperCamelCase , '''cached_{}_{}_{}_{}'''.format( '''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(UpperCamelCase ) , UpperCamelCase , ) , ) lowercase__ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowercase__ ,lowercase__ = label_list[2], label_list[1] lowercase__ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowercase__ = cached_features_file + '''.lock''' with FileLock(UpperCamelCase ): if os.path.exists(UpperCamelCase ) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}" ) lowercase__ = torch.load(UpperCamelCase ) else: logger.info(f"Creating features from dataset file at {data_dir}" ) lowercase__ = ( processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase ) ) logger.info('''Training examples: %s''' , len(UpperCamelCase ) ) lowercase__ = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) logger.info('''Saving features into cached file %s''' , UpperCamelCase ) torch.save(self.features , UpperCamelCase ) def __len__(self : Optional[int] ): '''simple docstring''' return len(self.features ) def __getitem__(self : Tuple , UpperCamelCase : str ): '''simple docstring''' return self.features[i] def UpperCamelCase__ (self : str ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : List[InputFeatures] def __init__(self : List[Any] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = 128 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , ): '''simple docstring''' lowercase__ = hans_processors[task]() lowercase__ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) lowercase__ ,lowercase__ = label_list[2], label_list[1] lowercase__ = label_list lowercase__ = processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase ) lowercase__ = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ): if ex_index % 10000 == 0: logger.info('''Writing example %d of %d''' % (ex_index, len(UpperCamelCase )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) lowercase__ = tf.data.Dataset.from_generator( UpperCamelCase , ( { '''example_id''': tf.intaa, '''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa, }, tf.intaa, ) , ( { '''example_id''': tf.TensorShape([] ), '''input_ids''': tf.TensorShape([None, None] ), '''attention_mask''': tf.TensorShape([None, None] ), '''token_type_ids''': tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return self.dataset def __len__(self : List[Any] ): '''simple docstring''' return len(self.features ) def __getitem__(self : Union[str, Any] , UpperCamelCase : Tuple ): '''simple docstring''' return self.features[i] def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.label_list class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Any ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' ) def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[Any] ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = [] for i, line in enumerate(UpperCamelCase ): if i == 0: continue lowercase__ = '''%s-%s''' % (set_type, line[0]) lowercase__ = line[5] lowercase__ = line[6] lowercase__ = line[7][2:] if line[7].startswith('''ex''' ) else line[7] lowercase__ = line[0] examples.append(InputExample(guid=UpperCamelCase , text_a=UpperCamelCase , text_b=UpperCamelCase , label=UpperCamelCase , pairID=UpperCamelCase ) ) return examples def _SCREAMING_SNAKE_CASE (A , A , A , A , ) -> int: """simple docstring""" lowercase__ = {label: i for i, label in enumerate(A )} lowercase__ = [] for ex_index, example in tqdm.tqdm(enumerate(A ) , desc='''convert examples to features''' ): if ex_index % 10_000 == 0: logger.info('''Writing example %d''' % (ex_index) ) lowercase__ = tokenizer( example.text_a , example.text_b , add_special_tokens=A , max_length=A , padding='''max_length''' , truncation=A , return_overflowing_tokens=A , ) lowercase__ = label_map[example.label] if example.label in label_map else 0 lowercase__ = int(example.pairID ) features.append(InputFeatures(**A , label=A , pairID=A ) ) for i, example in enumerate(examples[:5] ): logger.info('''*** Example ***''' ) logger.info(f"guid: {example}" ) logger.info(f"features: {features[i]}" ) return features lowerCamelCase : Tuple = { 'hans': 3, } lowerCamelCase : str = { 'hans': HansProcessor, }
2
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase : str = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
1
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCamelCase : str = parser.parse_args() if args.model_type == "bert": lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase : Any = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCamelCase : int = model.state_dict() lowerCamelCase : int = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] lowerCamelCase : Tuple = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] lowerCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] lowerCamelCase : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] lowerCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] lowerCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] lowerCamelCase : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowerCamelCase : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""] lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : List[Any] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = """realm""" def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) # Common config lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = retriever_proj_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = num_candidates lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = type_vocab_size lowercase__ = layer_norm_eps # Reader config lowercase__ = span_hidden_size lowercase__ = max_span_width lowercase__ = reader_layer_norm_eps lowercase__ = reader_beam_size lowercase__ = reader_seq_len # Retrieval config lowercase__ = num_block_records lowercase__ = searcher_beam_size
2
1
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class __lowerCAmelCase : '''simple docstring''' def __init__(self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''''' lowercase__ = '''''' lowercase__ = [] lowercase__ = 0 lowercase__ = 256 lowercase__ = 0 lowercase__ = 0 lowercase__ = 0 lowercase__ = 0 def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : int ): '''simple docstring''' lowercase__ = cva.imread(UpperCamelCase , 0 ) lowercase__ = copy.deepcopy(self.img ) lowercase__ ,lowercase__ ,lowercase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' ) lowercase__ = np.sum(UpperCamelCase ) for i in range(len(UpperCamelCase ) ): lowercase__ = x[i] / self.k self.sk += prk lowercase__ = (self.L - 1) * self.sk if self.rem != 0: lowercase__ = int(last % last ) lowercase__ = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(UpperCamelCase ) lowercase__ = int(np.ma.count(self.img ) / self.img[1].size ) lowercase__ = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): lowercase__ = self.img[j][i] if num != self.last_list[num]: lowercase__ = self.last_list[num] cva.imwrite('''output_data/output.jpg''' , self.img ) def UpperCamelCase__ (self : int ): '''simple docstring''' plt.hist(self.img.ravel() , 256 , [0, 256] ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' cva.imshow('''Output-Image''' , self.img ) cva.imshow('''Input-Image''' , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCamelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') lowerCamelCase : int = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
2
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : int = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = """mvp""" lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""] lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = classifier_dropout lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = use_prompt lowercase__ = prompt_length lowercase__ = prompt_mid_dim super().__init__( pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ): lowercase__ = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " '''The config can simply be saved and uploaded again to be fixed.''' )
2
1
'''simple docstring''' import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename lowerCamelCase : Tuple = 'http://www.mocksite.com/file1.txt' lowerCamelCase : Union[str, Any] = '"text": ["foo", "foo"]' lowerCamelCase : Dict = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8' class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = 200 lowerCAmelCase__ : str = {"""Content-Length""": """100"""} lowerCAmelCase__ : Optional[Any] = {} def UpperCamelCase__ (self : Tuple , **UpperCamelCase : int ): '''simple docstring''' return [bytes(UpperCamelCase , '''utf-8''' )] def _SCREAMING_SNAKE_CASE (*A , **A ) -> str: """simple docstring""" return MockResponse() @pytest.mark.parametrize('''urls_type''' , [str, list, dict] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[str]: """simple docstring""" import requests monkeypatch.setattr(A , '''request''' , A ) lowercase__ = URL if issubclass(A , A ): lowercase__ = url elif issubclass(A , A ): lowercase__ = [url] elif issubclass(A , A ): lowercase__ = {'''train''': url} lowercase__ = '''dummy''' lowercase__ = '''downloads''' lowercase__ = tmp_path lowercase__ = DownloadConfig( cache_dir=os.path.join(A , A ) , use_etag=A , ) lowercase__ = DownloadManager(dataset_name=A , download_config=A ) lowercase__ = dl_manager.download(A ) lowercase__ = urls for downloaded_paths in [downloaded_paths]: if isinstance(A , A ): lowercase__ = [downloaded_paths] lowercase__ = [urls] elif isinstance(A , A ): assert "train" in downloaded_paths.keys() lowercase__ = downloaded_paths.values() lowercase__ = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(A , A ): assert downloaded_path == dl_manager.downloaded_paths[input_url] lowercase__ = Path(A ) lowercase__ = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() lowercase__ = downloaded_path.read_text() assert content == CONTENT lowercase__ = downloaded_path.with_suffix('''.json''' ) assert metadata_downloaded_path.exists() lowercase__ = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('''paths_type''' , [str, list, dict] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[str]: """simple docstring""" lowercase__ = str(A ) if issubclass(A , A ): lowercase__ = filename elif issubclass(A , A ): lowercase__ = [filename] elif issubclass(A , A ): lowercase__ = {'''train''': filename} lowercase__ = '''dummy''' lowercase__ = xz_file.parent lowercase__ = '''extracted''' lowercase__ = DownloadConfig( cache_dir=A , use_etag=A , ) lowercase__ = DownloadManager(dataset_name=A , download_config=A ) lowercase__ = dl_manager.extract(A ) lowercase__ = paths for extracted_paths in [extracted_paths]: if isinstance(A , A ): lowercase__ = [extracted_paths] lowercase__ = [paths] elif isinstance(A , A ): assert "train" in extracted_paths.keys() lowercase__ = extracted_paths.values() lowercase__ = paths.values() assert extracted_paths for extracted_path, input_path in zip(A , A ): assert extracted_path == dl_manager.extracted_paths[input_path] lowercase__ = Path(A ) lowercase__ = extracted_path.parts assert parts[-1] == hash_url_to_filename(A , etag=A ) assert parts[-2] == extracted_subdir assert extracted_path.exists() lowercase__ = extracted_path.read_text() lowercase__ = text_file.read_text() assert extracted_file_content == expected_file_content def _SCREAMING_SNAKE_CASE (A , A ) -> Any: """simple docstring""" assert path.endswith('''.jsonl''' ) for num_items, line in enumerate(A , start=1 ): lowercase__ = json.loads(line.decode('''utf-8''' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] ) def _SCREAMING_SNAKE_CASE (A , A ) -> Any: """simple docstring""" lowercase__ = request.getfixturevalue(A ) lowercase__ = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A ) , start=1 ): _test_jsonl(A , A ) assert num_jsonl == 2 @pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] ) def _SCREAMING_SNAKE_CASE (A , A ) -> int: """simple docstring""" lowercase__ = request.getfixturevalue(A ) lowercase__ = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A ) , start=1 ): _test_jsonl(A , A ) assert num_tar == 1 assert num_jsonl == 2 def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(A ) , start=1 ): assert os.path.basename(A ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
2
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = DebertaVaTokenizer lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast lowerCAmelCase__ : str = True lowerCAmelCase__ : Tuple = True def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = '''this is a test''' lowercase__ = '''this is a test''' return input_text, output_text def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''<pad>''' lowercase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(UpperCamelCase ) , 30001 ) def UpperCamelCase__ (self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''This is a test''' lowercase__ = [13, 1, 4398, 25, 21, 1289] lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) # fmt: off lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DebertaVaTokenizer(UpperCamelCase ) lowercase__ = tokenizer.encode('''sequence builders''' ) lowercase__ = tokenizer.encode('''multi-sequence build''' ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , ) @slow def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
1
'''simple docstring''' import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = DDIMPipeline lowerCAmelCase__ : Tuple = UNCONDITIONAL_IMAGE_GENERATION_PARAMS lowerCAmelCase__ : int = PipelineTesterMixin.required_optional_params - { """num_images_per_prompt""", """latents""", """callback""", """callback_steps""", } lowerCAmelCase__ : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS lowerCAmelCase__ : int = False def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) lowercase__ = DDIMScheduler() lowercase__ = {'''unet''': unet, '''scheduler''': scheduler} return components def UpperCamelCase__ (self : Dict , UpperCamelCase : Any , UpperCamelCase : str=0 ): '''simple docstring''' if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''batch_size''': 1, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = self.get_dummy_inputs(UpperCamelCase ) lowercase__ = pipe(**UpperCamelCase ).images lowercase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) lowercase__ = np.array( [1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] ) lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(UpperCamelCase , 1E-3 ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def UpperCamelCase__ (self : int ): '''simple docstring''' super().test_save_load_local(expected_max_difference=3E-3 ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=3E-3 ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = '''google/ddpm-cifar10-32''' lowercase__ = UNetaDModel.from_pretrained(UpperCamelCase ) lowercase__ = DDIMScheduler() lowercase__ = DDIMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase ) ddim.to(UpperCamelCase ) ddim.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.manual_seed(0 ) lowercase__ = ddim(generator=UpperCamelCase , eta=0.0 , output_type='''numpy''' ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__ = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''google/ddpm-ema-bedroom-256''' lowercase__ = UNetaDModel.from_pretrained(UpperCamelCase ) lowercase__ = DDIMScheduler.from_pretrained(UpperCamelCase ) lowercase__ = DDIMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase ) ddpm.to(UpperCamelCase ) ddpm.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.manual_seed(0 ) lowercase__ = ddpm(generator=UpperCamelCase , output_type='''numpy''' ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowercase__ = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
2
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]: """simple docstring""" lowercase__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE (A ) -> List[str]: """simple docstring""" lowercase__ ,lowercase__ = emb.weight.shape lowercase__ = nn.Linear(A , A , bias=A ) lowercase__ = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]: """simple docstring""" lowercase__ = torch.load(A , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A ) lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A ) if mbart_aa and finetuned: lowercase__ = '''relu''' lowercase__ = state_dict['''decoder.embed_tokens.weight'''] lowercase__ = MBartForConditionalGeneration(A ) model.model.load_state_dict(A ) if finetuned: lowercase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') lowerCamelCase : Any = parser.parse_args() lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
2
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCamelCase : List[Any] = 16 lowerCamelCase : Tuple = 32 def _SCREAMING_SNAKE_CASE (A , A = 16 , A = "bert-base-cased" ) -> Union[str, Any]: """simple docstring""" lowercase__ = AutoTokenizer.from_pretrained(A ) lowercase__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowercase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ = datasets.map( A , batched=A , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=A ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(A , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowercase__ = DataLoader( tokenized_datasets['''train'''] , shuffle=A , collate_fn=A , batch_size=A ) lowercase__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> List[str]: """simple docstring""" model.eval() lowercase__ = 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ = model(**A ) lowercase__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowercase__ ,lowercase__ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(A ) - 1: lowercase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowercase__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=A , references=A , ) lowercase__ = metric.compute() return eval_metric["accuracy"] def _SCREAMING_SNAKE_CASE (A , A ) -> int: """simple docstring""" lowercase__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ = config['''lr'''] lowercase__ = int(config['''num_epochs'''] ) lowercase__ = int(config['''seed'''] ) lowercase__ = int(config['''batch_size'''] ) lowercase__ = args.model_name_or_path set_seed(A ) lowercase__ ,lowercase__ = get_dataloaders(A , A , A ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ = AutoModelForSequenceClassification.from_pretrained(A , return_dict=A ) # Instantiate optimizer lowercase__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ = optimizer_cls(params=model.parameters() , lr=A ) if accelerator.state.deepspeed_plugin is not None: lowercase__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: lowercase__ = 1 lowercase__ = (len(A ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ = get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=0 , num_training_steps=A , ) else: lowercase__ = DummyScheduler(A , total_num_steps=A , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = accelerator.prepare( A , A , A , A , A ) # We need to keep track of how many total steps we have iterated over lowercase__ = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ = 0 lowercase__ = evaluate.load('''glue''' , '''mrpc''' ) lowercase__ = num_epochs if args.partial_train_epoch is not None: lowercase__ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) lowercase__ = args.resume_from_checkpoint.split('''epoch_''' )[1] lowercase__ = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break lowercase__ = int(A ) + 1 lowercase__ = evaluation_loop(A , A , A , A ) accelerator.print('''resumed checkpoint performance:''' , A ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , '''r''' ) as f: lowercase__ = json.load(A ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model lowercase__ = {} for epoch in range(A , A ): model.train() for step, batch in enumerate(A ): lowercase__ = model(**A ) lowercase__ = outputs.loss lowercase__ = loss / gradient_accumulation_steps accelerator.backward(A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 lowercase__ = f"epoch_{epoch}" lowercase__ = os.path.join(args.output_dir , A ) accelerator.save_state(A ) lowercase__ = evaluation_loop(A , A , A , A ) lowercase__ = accuracy lowercase__ = lr_scheduler.get_lr()[0] lowercase__ = optimizer.param_groups[0]['''lr'''] lowercase__ = epoch lowercase__ = overall_step accelerator.print(f"epoch {epoch}:" , A ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , '''w''' ) as f: json.dump(A , A ) def _SCREAMING_SNAKE_CASE () -> List[str]: """simple docstring""" lowercase__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=A , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A , ) parser.add_argument( '''--output_dir''' , type=A , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=A , default=A , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=A , default=A , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=A , default=2 , help='''Number of train epochs.''' , ) lowercase__ = parser.parse_args() lowercase__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(A , A ) if __name__ == "__main__": main()
2
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCamelCase : List[Any] = logging.getLogger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ): '''simple docstring''' lowercase__ = label_idx def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: lowercase__ = [] lowercase__ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 lowercase__ = [] lowercase__ = [] else: lowercase__ = line.split(''' ''' ) words.append(splits[0] ) if len(UpperCamelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) return examples def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(UpperCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(UpperCamelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] ): '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(UpperCamelCase ): lowercase__ = [] lowercase__ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(UpperCamelCase ) == len(UpperCamelCase ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 return examples def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for sentence in parse_incr(UpperCamelCase ): lowercase__ = preds_list[example_id] lowercase__ = '''''' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(UpperCamelCase ) example_id += 1 def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
2
1
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = """megatron-bert""" def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache
2
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = tempfile.mkdtemp() lowercase__ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowercase__ = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } lowercase__ = os.path.join(self.tmpdirname , UpperCamelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Tuple , **UpperCamelCase : Optional[int] ): '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCamelCase__ (self : Tuple , **UpperCamelCase : List[str] ): '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCamelCase__ (self : List[Any] , **UpperCamelCase : Optional[int] ): '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCamelCase__ (self : str ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = self.get_image_processor() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase ) lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase__ = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowercase__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 ) lowercase__ = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = self.prepare_image_inputs() lowercase__ = image_processor(UpperCamelCase , return_tensors='''np''' ) lowercase__ = processor(images=UpperCamelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = '''lower newer''' lowercase__ = processor(text=UpperCamelCase ) lowercase__ = tokenizer(UpperCamelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = '''lower newer''' lowercase__ = self.prepare_image_inputs() lowercase__ = processor(text=UpperCamelCase , images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase ): processor() def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ = processor.batch_decode(UpperCamelCase ) lowercase__ = tokenizer.batch_decode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = AlignProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase ) lowercase__ = '''lower newer''' lowercase__ = self.prepare_image_inputs() lowercase__ = processor(text=UpperCamelCase , images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
2
'''simple docstring''' # Lint as: python3 import itertools import os import re lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])') lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])') lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)') lowerCamelCase : List[Any] = re.compile(R'(_{2,})') lowerCamelCase : str = R'^\w+(\.\w+)*$' lowerCamelCase : Dict = R'<>:/\|?*' def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A ) lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A ) return name.lower() def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = _single_underscore_re.split(A ) lowercase__ = [_multiple_underscores_re.split(A ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' ) def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , A ): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." ) return f"{filename_prefix_for_name(A )}-{split}" def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) if filetype_suffix: prefix += f".{filetype_suffix}" lowercase__ = os.path.join(A , A ) return f"{filepath}*" def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) lowercase__ = os.path.join(A , A ) if shard_lengths: lowercase__ = len(A ) lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )] if filetype_suffix: lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: lowercase__ = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
2
1
'''simple docstring''' from __future__ import annotations import csv import requests from bsa import BeautifulSoup def _SCREAMING_SNAKE_CASE (A = "" ) -> dict[str, float]: """simple docstring""" lowercase__ = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' lowercase__ = BeautifulSoup(requests.get(A ).text , '''html.parser''' ) lowercase__ = soup.find_all('''td''' , attrs='''titleColumn''' ) lowercase__ = soup.find_all('''td''' , class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(A , A ) } def _SCREAMING_SNAKE_CASE (A = "IMDb_Top_250_Movies.csv" ) -> None: """simple docstring""" lowercase__ = get_imdb_top_aaa_movies() with open(A , '''w''' , newline='''''' ) as out_file: lowercase__ = csv.writer(A ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
2
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __lowerCAmelCase : '''simple docstring''' def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = decoder_seq_length # For common tests lowercase__ = self.decoder_seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_model lowercase__ = decoder_layers lowercase__ = decoder_layers lowercase__ = decoder_ffn_dim lowercase__ = decoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = eos_token_id lowercase__ = bos_token_id lowercase__ = pad_token_id lowercase__ = decoder_start_token_id lowercase__ = use_cache lowercase__ = max_position_embeddings lowercase__ = None lowercase__ = decoder_seq_length lowercase__ = 2 lowercase__ = 1 def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ): '''simple docstring''' lowercase__ = True lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval() lowercase__ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) lowercase__ = model(UpperCamelCase ) lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 ) lowercase__ = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ = model(UpperCamelCase )['''last_hidden_state'''] lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state'''] # select random slice lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowercase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else () lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : List[str] = False def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass
2
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Any = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = """lxmert""" lowerCAmelCase__ : Tuple = {} def __init__(self : Union[str, Any] , UpperCamelCase : int=30522 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : List[Any]=9500 , UpperCamelCase : Tuple=1600 , UpperCamelCase : List[Any]=400 , UpperCamelCase : Dict=3072 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : str=512 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : int=0.02 , UpperCamelCase : str=1E-12 , UpperCamelCase : List[str]=9 , UpperCamelCase : Dict=5 , UpperCamelCase : Optional[int]=5 , UpperCamelCase : str=2048 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Optional[int]=6.67 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : str=True , UpperCamelCase : List[str]=True , **UpperCamelCase : int , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = num_qa_labels lowercase__ = num_object_labels lowercase__ = num_attr_labels lowercase__ = l_layers lowercase__ = x_layers lowercase__ = r_layers lowercase__ = visual_feat_dim lowercase__ = visual_pos_dim lowercase__ = visual_loss_normalizer lowercase__ = task_matched lowercase__ = task_mask_lm lowercase__ = task_obj_predict lowercase__ = task_qa lowercase__ = visual_obj_loss lowercase__ = visual_attr_loss lowercase__ = visual_feat_loss lowercase__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**UpperCamelCase )
2
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" if not isinstance(A , A ): raise TypeError('''only integers accepted as input''' ) else: lowercase__ = str(abs(A ) ) lowercase__ = [list(A ) for char in range(len(A ) )] for index in range(len(A ) ): num_transpositions[index].pop(A ) return max( int(''''''.join(list(A ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
2
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCamelCase : List[str] = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = ['OwlViTFeatureExtractor'] lowerCamelCase : str = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : int = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCamelCase : str = Mapping[str, np.ndarray] lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict. lowerCamelCase : Any = 0.0_1 @dataclasses.dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent lowerCAmelCase__ : Optional[Sequence[int]] = None def _SCREAMING_SNAKE_CASE (A ) -> Protein: """simple docstring""" lowercase__ = R'''(\[[A-Z]+\]\n)''' lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0] lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowercase__ = ["N", "CA", "C"] lowercase__ = None lowercase__ = None lowercase__ = None for g in groups: if "[PRIMARY]" == g[0]: lowercase__ = g[1][0].strip() for i in range(len(A ) ): if seq[i] not in residue_constants.restypes: lowercase__ = '''X''' # FIXME: strings are immutable lowercase__ = np.array( [residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowercase__ = [] for axis in range(3 ): tertiary.append(list(map(A , g[1][axis].split() ) ) ) lowercase__ = np.array(A ) lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowercase__ = np.zeros( ( len(A ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , ) def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) lowercase__ = prot.parents lowercase__ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowercase__ = [p for i, p in zip(A , A ) if i == chain_id] if parents is None or len(A ) == 0: lowercase__ = ['''N/A'''] pdb_headers.append(f"PARENT {' '.join(A )}" ) return pdb_headers def _SCREAMING_SNAKE_CASE (A , A ) -> str: """simple docstring""" lowercase__ = [] lowercase__ = pdb_str.split('''\n''' ) lowercase__ = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) lowercase__ = 42 if prot.parents is not None and len(prot.parents ) > 0: lowercase__ = [] if prot.parents_chain_index is not None: lowercase__ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(A ) , [] ) parent_dict[str(A )].append(A ) lowercase__ = max([int(A ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] ) parents_per_chain.append(A ) else: parents_per_chain.append(list(prot.parents ) ) else: lowercase__ = [['''N/A''']] def make_parent_line(A ) -> str: return f"PARENT {' '.join(A )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowercase__ = 0 for i, l in enumerate(A ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(A ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(A ): lowercase__ = parents_per_chain[chain_counter] else: lowercase__ = ['''N/A'''] out_pdb_lines.append(make_parent_line(A ) ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = residue_constants.restypes + ['''X'''] def res_atoa(A ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowercase__ = residue_constants.atom_types lowercase__ = [] lowercase__ = prot.atom_mask lowercase__ = prot.aatype lowercase__ = prot.atom_positions lowercase__ = prot.residue_index.astype(np.intaa ) lowercase__ = prot.b_factors lowercase__ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowercase__ = get_pdb_headers(A ) if len(A ) > 0: pdb_lines.extend(A ) lowercase__ = aatype.shape[0] lowercase__ = 1 lowercase__ = 0 lowercase__ = string.ascii_uppercase lowercase__ = None # Add all atom sites. for i in range(A ): lowercase__ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowercase__ = '''ATOM''' lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}" lowercase__ = '''''' lowercase__ = '''''' lowercase__ = 1.00 lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works. lowercase__ = '''''' lowercase__ = '''A''' if chain_index is not None: lowercase__ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowercase__ = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(A ) atom_index += 1 lowercase__ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowercase__ = True lowercase__ = chain_index[i + 1] if should_terminate: # Close the chain. lowercase__ = '''TER''' lowercase__ = ( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(A ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(A , A ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
2
1
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def __init__(self : Dict , UpperCamelCase : str , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : Optional[int]=3 , UpperCamelCase : Optional[int]=18 , UpperCamelCase : List[Any]=30 , UpperCamelCase : Dict=400 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=True , UpperCamelCase : int=None , UpperCamelCase : Optional[int]=True , UpperCamelCase : Union[str, Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , UpperCamelCase : Union[str, Any]=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , UpperCamelCase : Union[str, Any]=True , ): '''simple docstring''' lowercase__ = size if size is not None else {'''height''': 224, '''width''': 224} lowercase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = image_size lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size lowercase__ = do_center_crop lowercase__ = crop_size lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std lowercase__ = do_convert_rgb def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def UpperCamelCase__ (self : List[str] , UpperCamelCase : Dict=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : Tuple=False ): '''simple docstring''' assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: lowercase__ = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: lowercase__ = [] for i in range(self.batch_size ): lowercase__ ,lowercase__ = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension lowercase__ = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs] if torchify: lowercase__ = [torch.from_numpy(UpperCamelCase ) for x in image_inputs] return image_inputs @require_torch @require_vision class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : str = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase ) @property def UpperCamelCase__ (self : List[str] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''do_convert_rgb''' ) ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase ) lowercase__ = 3 @property def UpperCamelCase__ (self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase , '''do_convert_rgb''' ) ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase__ = image_processing(UpperCamelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]: """simple docstring""" lowercase__ = [] create_all_state(1 , A , A , [] , A ) return result def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None: """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def _SCREAMING_SNAKE_CASE (A ) -> None: """simple docstring""" for i in total_list: print(*A ) if __name__ == "__main__": lowerCamelCase : Tuple = 4 lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = generate_all_combinations(n, k) print_all_state(total_list)
2
1
'''simple docstring''' import torch from torch import nn class __lowerCAmelCase (nn.Module ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : int=1 , UpperCamelCase : List[str]=False ): '''simple docstring''' super().__init__() lowercase__ = n_token lowercase__ = d_embed lowercase__ = d_proj lowercase__ = cutoffs + [n_token] lowercase__ = [0] + self.cutoffs lowercase__ = div_val lowercase__ = self.cutoffs[0] lowercase__ = len(self.cutoffs ) - 1 lowercase__ = self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowercase__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) lowercase__ = nn.Parameter(torch.zeros(self.n_clusters ) ) lowercase__ = nn.ModuleList() lowercase__ = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase , UpperCamelCase ) ) ) else: self.out_projs.append(UpperCamelCase ) self.out_layers.append(nn.Linear(UpperCamelCase , UpperCamelCase ) ) else: for i in range(len(self.cutoffs ) ): lowercase__ ,lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase , UpperCamelCase ) ) ) self.out_layers.append(nn.Linear(UpperCamelCase , r_idx - l_idx ) ) lowercase__ = keep_order def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' if proj is None: lowercase__ = nn.functional.linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowercase__ = nn.functional.linear(UpperCamelCase , proj.t().contiguous() ) lowercase__ = nn.functional.linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : Tuple=False ): '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n lowercase__ = hidden[..., :-1, :].contiguous() lowercase__ = labels[..., 1:].contiguous() lowercase__ = hidden.view(-1 , hidden.size(-1 ) ) lowercase__ = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' ) else: lowercase__ = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: lowercase__ = self._compute_logit(UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: lowercase__ = labels != -100 lowercase__ = torch.zeros_like(UpperCamelCase , dtype=hidden.dtype , device=hidden.device ) lowercase__ = ( -nn.functional.log_softmax(UpperCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowercase__ = nn.functional.log_softmax(UpperCamelCase , dim=-1 ) else: # construct weights and biases lowercase__ ,lowercase__ = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase__ ,lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ = self.out_layers[0].weight[l_idx:r_idx] lowercase__ = self.out_layers[0].bias[l_idx:r_idx] else: lowercase__ = self.out_layers[i].weight lowercase__ = self.out_layers[i].bias if i == 0: lowercase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowercase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(UpperCamelCase ) biases.append(UpperCamelCase ) lowercase__ ,lowercase__ ,lowercase__ = weights[0], biases[0], self.out_projs[0] lowercase__ = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowercase__ = nn.functional.log_softmax(UpperCamelCase , dim=1 ) if labels is None: lowercase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowercase__ = torch.zeros_like(UpperCamelCase , dtype=hidden.dtype , device=hidden.device ) lowercase__ = 0 lowercase__ = [0] + self.cutoffs for i in range(len(UpperCamelCase ) - 1 ): lowercase__ ,lowercase__ = cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowercase__ = (labels >= l_idx) & (labels < r_idx) lowercase__ = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowercase__ = labels.index_select(0 , UpperCamelCase ) - l_idx lowercase__ = head_logprob.index_select(0 , UpperCamelCase ) lowercase__ = hidden.index_select(0 , UpperCamelCase ) else: lowercase__ = hidden if i == 0: if labels is not None: lowercase__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: lowercase__ = head_logprob[:, : self.cutoffs[0]] else: lowercase__ ,lowercase__ ,lowercase__ = weights[i], biases[i], self.out_projs[i] lowercase__ = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowercase__ = nn.functional.log_softmax(UpperCamelCase , dim=1 ) lowercase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowercase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: lowercase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowercase__ = logprob_i if labels is not None: if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order: out.index_copy_(0 , UpperCamelCase , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def UpperCamelCase__ (self : List[str] , UpperCamelCase : Tuple ): '''simple docstring''' if self.n_clusters == 0: lowercase__ = self._compute_logit(UpperCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(UpperCamelCase , dim=-1 ) else: # construct weights and biases lowercase__ ,lowercase__ = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase__ ,lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ = self.out_layers[0].weight[l_idx:r_idx] lowercase__ = self.out_layers[0].bias[l_idx:r_idx] else: lowercase__ = self.out_layers[i].weight lowercase__ = self.out_layers[i].bias if i == 0: lowercase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowercase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(UpperCamelCase ) biases.append(UpperCamelCase ) lowercase__ ,lowercase__ ,lowercase__ = weights[0], biases[0], self.out_projs[0] lowercase__ = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowercase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowercase__ = nn.functional.log_softmax(UpperCamelCase , dim=1 ) lowercase__ = [0] + self.cutoffs for i in range(len(UpperCamelCase ) - 1 ): lowercase__ ,lowercase__ = cutoff_values[i], cutoff_values[i + 1] if i == 0: lowercase__ = head_logprob[:, : self.cutoffs[0]] else: lowercase__ ,lowercase__ ,lowercase__ = weights[i], biases[i], self.out_projs[i] lowercase__ = self._compute_logit(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowercase__ = nn.functional.log_softmax(UpperCamelCase , dim=1 ) lowercase__ = head_logprob[:, -i] + tail_logprob_i lowercase__ = logprob_i return out
2
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase : Optional[Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCamelCase : Tuple = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCamelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCamelCase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCamelCase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCamelCase : Optional[int] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCamelCase : Dict = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) ) lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _SCREAMING_SNAKE_CASE (A = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def _SCREAMING_SNAKE_CASE () -> Tuple: """simple docstring""" lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS] lowercase__ = poker_hands.copy() shuffle(A ) lowercase__ = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__ = True lowercase__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = 0 lowercase__ = os.path.abspath(os.path.dirname(A ) ) lowercase__ = os.path.join(A , '''poker_hands.txt''' ) with open(A ) as file_hand: for line in file_hand: lowercase__ = line[:14].strip() lowercase__ = line[15:].strip() lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A ) lowercase__ = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
2
1
'''simple docstring''' import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger lowerCamelCase : Tuple = get_logger(__name__) class __lowerCAmelCase : '''simple docstring''' def __init__(self : Tuple , UpperCamelCase : Optional[str] = None ): '''simple docstring''' lowercase__ = ( os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) lowercase__ = Extractor def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" lowercase__ = os.path.abspath(UpperCamelCase ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : str , UpperCamelCase : bool ): '''simple docstring''' return force_extract or ( not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase )) ) def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : bool = False ): '''simple docstring''' lowercase__ = self.extractor.infer_extractor_format(UpperCamelCase ) if not extractor_format: return input_path lowercase__ = self._get_output_path(UpperCamelCase ) if self._do_extract(UpperCamelCase , UpperCamelCase ): self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return output_path class __lowerCAmelCase (lowercase_ ): '''simple docstring''' @classmethod @abstractmethod def UpperCamelCase__ (cls : Any , UpperCamelCase : Union[Path, str] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' ... @staticmethod @abstractmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' ... class __lowerCAmelCase (lowercase_ , lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[bytes] = [] @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : int ): '''simple docstring''' with open(UpperCamelCase , '''rb''' ) as f: return f.read(UpperCamelCase ) @classmethod def UpperCamelCase__ (cls : int , UpperCamelCase : Union[Path, str] , UpperCamelCase : bytes = b"" ): '''simple docstring''' if not magic_number: lowercase__ = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) try: lowercase__ = cls.read_magic_number(UpperCamelCase , UpperCamelCase ) except OSError: return False return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' @classmethod def UpperCamelCase__ (cls : Optional[int] , UpperCamelCase : Union[Path, str] , **UpperCamelCase : List[Any] ): '''simple docstring''' return tarfile.is_tarfile(UpperCamelCase ) @staticmethod def UpperCamelCase__ (UpperCamelCase : List[str] , UpperCamelCase : Optional[int] ): '''simple docstring''' def resolved(UpperCamelCase : str ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase ) ) def badpath(UpperCamelCase : str , UpperCamelCase : str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase ) def badlink(UpperCamelCase : int , UpperCamelCase : str ) -> bool: # Links are interpreted relative to the directory containing the link lowercase__ = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase ) lowercase__ = resolved(UpperCamelCase ) for finfo in members: if badpath(finfo.name , UpperCamelCase ): logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" ) elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" ) elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ): logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" ) else: yield finfo @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) lowercase__ = tarfile.open(UpperCamelCase ) tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) ) tar_file.close() class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = [B"""\x1F\x8B"""] @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' with gzip.open(UpperCamelCase , '''rb''' ) as gzip_file: with open(UpperCamelCase , '''wb''' ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def UpperCamelCase__ (cls : Tuple , UpperCamelCase : Union[Path, str] , UpperCamelCase : bytes = b"" ): '''simple docstring''' if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase , '''rb''' ) as fp: lowercase__ = _EndRecData(UpperCamelCase ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: lowercase__ = fp.read(UpperCamelCase ) # CD is where we expect it to be if len(UpperCamelCase ) == sizeCentralDir: lowercase__ = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with zipfile.ZipFile(UpperCamelCase , '''r''' ) as zip_file: zip_file.extractall(UpperCamelCase ) zip_file.close() class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Dict = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' with lzma.open(UpperCamelCase ) as compressed_file: with open(UpperCamelCase , '''wb''' ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Dict = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' if not config.RARFILE_AVAILABLE: raise ImportError('''Please pip install rarfile''' ) import rarfile os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) lowercase__ = rarfile.RarFile(UpperCamelCase ) rf.extractall(UpperCamelCase ) rf.close() class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' if not config.ZSTANDARD_AVAILABLE: raise ImportError('''Please pip install zstandard''' ) import zstandard as zstd lowercase__ = zstd.ZstdDecompressor() with open(UpperCamelCase , '''rb''' ) as ifh, open(UpperCamelCase , '''wb''' ) as ofh: dctx.copy_stream(UpperCamelCase , UpperCamelCase ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = [B"""\x42\x5A\x68"""] @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' with bza.open(UpperCamelCase , '''rb''' ) as compressed_file: with open(UpperCamelCase , '''wb''' ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' if not config.PY7ZR_AVAILABLE: raise ImportError('''Please pip install py7zr''' ) import pyazr os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) with pyazr.SevenZipFile(UpperCamelCase , '''r''' ) as archive: archive.extractall(UpperCamelCase ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[Any] = [B"""\x04\x22\x4D\x18"""] @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] ): '''simple docstring''' if not config.LZ4_AVAILABLE: raise ImportError('''Please pip install lz4''' ) import lza.frame with lza.frame.open(UpperCamelCase , '''rb''' ) as compressed_file: with open(UpperCamelCase , '''wb''' ) as extracted_file: shutil.copyfileobj(UpperCamelCase , UpperCamelCase ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def UpperCamelCase__ (cls : Optional[Any] ): '''simple docstring''' return max( len(UpperCamelCase ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase , UpperCamelCase ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def UpperCamelCase__ (UpperCamelCase : Union[Path, str] , UpperCamelCase : int ): '''simple docstring''' try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase ) except OSError: return b"" @classmethod def UpperCamelCase__ (cls : Any , UpperCamelCase : Union[Path, str] , UpperCamelCase : bool = False ): '''simple docstring''' warnings.warn( '''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'infer_extractor_format\' instead.''' , category=UpperCamelCase , ) lowercase__ = cls.infer_extractor_format(UpperCamelCase ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def UpperCamelCase__ (cls : int , UpperCamelCase : Union[Path, str] ): # <Added version="2.4.0"/> '''simple docstring''' lowercase__ = cls._get_magic_number_max_length() lowercase__ = cls._read_magic_number(UpperCamelCase , UpperCamelCase ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ): return extractor_format @classmethod def UpperCamelCase__ (cls : Optional[Any] , UpperCamelCase : Union[Path, str] , UpperCamelCase : Union[Path, str] , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[BaseExtractor] = "deprecated" , ): '''simple docstring''' os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase ) # Prevent parallel extractions lowercase__ = str(Path(UpperCamelCase ).with_suffix('''.lock''' ) ) with FileLock(UpperCamelCase ): shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg warnings.warn( '''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. ''' '''Use \'extractor_format\' instead.''' , category=UpperCamelCase , ) lowercase__ = extractor if extractor != '''deprecated''' else extractor_format else: lowercase__ = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase , UpperCamelCase ) else: warnings.warn( '''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an ''' '''exception in 3.0.0.''' , category=UpperCamelCase , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase ): return extractor.extract(UpperCamelCase , UpperCamelCase )
2
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCamelCase : str = parser.parse_args() if args.model_type == "bert": lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase : Any = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCamelCase : int = model.state_dict() lowerCamelCase : int = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] lowerCamelCase : Tuple = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] lowerCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] lowerCamelCase : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] lowerCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] lowerCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] lowerCamelCase : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowerCamelCase : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""] lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
2
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A , A = None ) -> list[list[str]]: """simple docstring""" lowercase__ = word_bank or [] # create a table lowercase__ = len(A ) + 1 lowercase__ = [] for _ in range(A ): table.append([] ) # seed value lowercase__ = [[]] # because empty string has empty combination # iterate through the indices for i in range(A ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(A )] == word: lowercase__ = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(A )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(A )]: combination.reverse() return table[len(A )] if __name__ == "__main__": print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa'])) print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't'])) print( all_construct( 'hexagonosaurus', ['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'], ) )
2
'''simple docstring''' from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ): '''simple docstring''' lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
2
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase : Dict = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Tuple = """cvt""" def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) lowercase__ = num_channels lowercase__ = patch_sizes lowercase__ = patch_stride lowercase__ = patch_padding lowercase__ = embed_dim lowercase__ = num_heads lowercase__ = depth lowercase__ = mlp_ratio lowercase__ = attention_drop_rate lowercase__ = drop_rate lowercase__ = drop_path_rate lowercase__ = qkv_bias lowercase__ = cls_token lowercase__ = qkv_projection_method lowercase__ = kernel_qkv lowercase__ = padding_kv lowercase__ = stride_kv lowercase__ = padding_q lowercase__ = stride_q lowercase__ = initializer_range lowercase__ = layer_norm_eps
2
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" lowercase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: lowercase__ = [3, 3, 3, 3] lowercase__ = [5, 5, 5, 5] elif "fl4" in model_name: lowercase__ = [4, 4, 4, 4] lowercase__ = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: lowercase__ = [3, 3, 3, 3] if "lrf" in model_name: lowercase__ = [3, 3, 3, 3] else: lowercase__ = [2, 2, 2, 2] if "tiny" in model_name: lowercase__ = 96 elif "small" in model_name: lowercase__ = 96 elif "base" in model_name: lowercase__ = 128 elif "large" in model_name: lowercase__ = 192 elif "xlarge" in model_name: lowercase__ = 256 elif "huge" in model_name: lowercase__ = 352 # set label information lowercase__ = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: lowercase__ = '''imagenet-22k-id2label.json''' else: lowercase__ = '''imagenet-1k-id2label.json''' lowercase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ = {int(A ): v for k, v in idalabel.items()} lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = FocalNetConfig( embed_dim=A , depths=A , focal_levels=A , focal_windows=A , use_conv_embed=A , idalabel=A , labelaid=A , use_post_layernorm=A , use_layerscale=A , ) return config def _SCREAMING_SNAKE_CASE (A ) -> Dict: """simple docstring""" if "patch_embed.proj" in name: lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: lowercase__ = '''encoder.''' + name if "encoder.layers" in name: lowercase__ = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: lowercase__ = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: lowercase__ = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: lowercase__ = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: lowercase__ = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: lowercase__ = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": lowercase__ = '''layernorm.weight''' if name == "norm.bias": lowercase__ = '''layernorm.bias''' if "head" in name: lowercase__ = name.replace('''head''' , '''classifier''' ) else: lowercase__ = '''focalnet.''' + name return name def _SCREAMING_SNAKE_CASE (A , A , A=False ) -> Any: """simple docstring""" lowercase__ = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on lowercase__ = model_name_to_url[model_name] print('''Checkpoint URL: ''' , A ) lowercase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(A ) lowercase__ = val lowercase__ = get_focalnet_config(A ) lowercase__ = FocalNetForImageClassification(A ) model.eval() # load state dict model.load_state_dict(A ) # verify conversion lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ = BitImageProcessor( do_resize=A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A , crop_size=224 , do_normalize=A , image_mean=A , image_std=A , ) lowercase__ = Image.open(requests.get(A , stream=A ).raw ) lowercase__ = processor(images=A , return_tensors='''pt''' ) lowercase__ = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowercase__ = image_transforms(A ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , A , atol=1E-4 ) lowercase__ = model(**A ) lowercase__ = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": lowercase__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] ) elif model_name == "focalnet-tiny-lrf": lowercase__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] ) elif model_name == "focalnet-small": lowercase__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] ) elif model_name == "focalnet-small-lrf": lowercase__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] ) elif model_name == "focalnet-base": lowercase__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] ) elif model_name == "focalnet-base-lrf": lowercase__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] ) assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(A ) processor.save_pretrained(A ) if push_to_hub: print(f"Pushing model and processor of {model_name} to the hub..." ) model.push_to_hub(f"{model_name}" ) processor.push_to_hub(f"{model_name}" ) if __name__ == "__main__": lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) lowerCamelCase : Union[str, Any] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
2
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase : List[Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowerCamelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase : str = np.expand_dims(test_image, axis=0) lowerCamelCase : List[str] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase : Any = 'Normal' if result[0][0] == 1: lowerCamelCase : Any = 'Abnormality detected'
2
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : List[Any] = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = ['PLBartTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[Any] = [ 'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'PLBartForCausalLM', 'PLBartForConditionalGeneration', 'PLBartForSequenceClassification', 'PLBartModel', 'PLBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
2
'''simple docstring''' class __lowerCAmelCase : # Public class to implement a graph '''simple docstring''' def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = row lowercase__ = col lowercase__ = graph def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): # And finally, count all islands. '''simple docstring''' lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += 1 return count
2
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : List[Any] = { 'configuration_longformer': [ 'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongformerConfig', 'LongformerOnnxConfig', ], 'tokenization_longformer': ['LongformerTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = ['LongformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = [ 'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongformerForMaskedLM', 'LongformerForMultipleChoice', 'LongformerForQuestionAnswering', 'LongformerForSequenceClassification', 'LongformerForTokenClassification', 'LongformerModel', 'LongformerPreTrainedModel', 'LongformerSelfAttention', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : str = [ 'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLongformerForMaskedLM', 'TFLongformerForMultipleChoice', 'TFLongformerForQuestionAnswering', 'TFLongformerForSequenceClassification', 'TFLongformerForTokenClassification', 'TFLongformerModel', 'TFLongformerPreTrainedModel', 'TFLongformerSelfAttention', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase : Tuple = 'naver-clova-ix/donut-base' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__ = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__ = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> bool: """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') lowerCamelCase : Union[str, Any] = int(input('Enter number: ').strip()) print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A ) -> bool: """simple docstring""" return len(set(A ) ) == len(A ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCamelCase : Optional[int] = ['bert-base-uncased', 'bert-base-cased'] lowerCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-bert-tf-only' if is_tf_available(): class __lowerCAmelCase (tf.keras.Model ): '''simple docstring''' def __init__(self : List[str] , UpperCamelCase : Optional[Any] ): '''simple docstring''' super().__init__() lowercase__ = tokenizer lowercase__ = AutoConfig.from_pretrained(UpperCamelCase ) lowercase__ = TFAutoModel.from_config(UpperCamelCase ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' lowercase__ = self.tokenizer(UpperCamelCase ) lowercase__ = self.bert(**UpperCamelCase ) return out["pooler_output"] @require_tf @require_tensorflow_text class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().setUp() lowercase__ = [ BertTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false lowercase__ = [TFBertTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(UpperCamelCase , use_fast_bert_tokenizer=UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowercase__ = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] lowercase__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): lowercase__ = tokenizer(UpperCamelCase , return_tensors='''tf''' , padding='''longest''' ) lowercase__ = tf_tokenizer(UpperCamelCase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def UpperCamelCase__ (self : int ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase__ = tf_tokenizer(self.paired_sentences ) lowercase__ = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def UpperCamelCase__ (self : Any ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase__ = tf.function(UpperCamelCase ) for test_inputs in (self.test_sentences, self.paired_sentences): lowercase__ = tf.constant(UpperCamelCase ) lowercase__ = compiled_tokenizer(UpperCamelCase ) lowercase__ = tf_tokenizer(UpperCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def UpperCamelCase__ (self : Tuple ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase__ = ModelToSave(tokenizer=UpperCamelCase ) lowercase__ = tf.convert_to_tensor(self.test_sentences ) lowercase__ = model(UpperCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowercase__ = Path(UpperCamelCase ) / '''saved.model''' model.save(UpperCamelCase ) lowercase__ = tf.keras.models.load_model(UpperCamelCase ) lowercase__ = loaded_model(UpperCamelCase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
2
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCamelCase : Any = None lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : List[str] = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase : Any = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""] lowerCAmelCase__ : Optional[int] = TaTokenizer lowerCAmelCase__ : List[int] = [] def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True lowercase__ = extra_ids @staticmethod def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , ) return max_model_length def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowercase__ = os.path.join( UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) logger.info(f"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase__ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
2
1
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[int]: """simple docstring""" lowercase__ = nn.functional.normalize(A ) lowercase__ = nn.functional.normalize(A ) return torch.mm(A , normalized_text_embeds.t() ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = CLIPConfig lowerCAmelCase__ : Union[str, Any] = ["""CLIPEncoderLayer"""] def __init__(self : List[str] , UpperCamelCase : CLIPConfig ): '''simple docstring''' super().__init__(UpperCamelCase ) lowercase__ = CLIPVisionModel(config.vision_config ) lowercase__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=UpperCamelCase ) lowercase__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=UpperCamelCase ) lowercase__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=UpperCamelCase ) lowercase__ = nn.Parameter(torch.ones(17 ) , requires_grad=UpperCamelCase ) lowercase__ = nn.Parameter(torch.ones(3 ) , requires_grad=UpperCamelCase ) @torch.no_grad() def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : int ): '''simple docstring''' lowercase__ = self.vision_model(UpperCamelCase )[1] # pooled_output lowercase__ = self.visual_projection(UpperCamelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ = cosine_distance(UpperCamelCase , self.special_care_embeds ).cpu().float().numpy() lowercase__ = cosine_distance(UpperCamelCase , self.concept_embeds ).cpu().float().numpy() lowercase__ = [] lowercase__ = image_embeds.shape[0] for i in range(UpperCamelCase ): lowercase__ = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images lowercase__ = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): lowercase__ = special_cos_dist[i][concept_idx] lowercase__ = self.special_care_embeds_weights[concept_idx].item() lowercase__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} ) lowercase__ = 0.01 for concept_idx in range(len(cos_dist[0] ) ): lowercase__ = cos_dist[i][concept_idx] lowercase__ = self.concept_embeds_weights[concept_idx].item() lowercase__ = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(UpperCamelCase ) result.append(UpperCamelCase ) lowercase__ = [len(res['''bad_concepts'''] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def UpperCamelCase__ (self : Tuple , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor ): '''simple docstring''' lowercase__ = self.vision_model(UpperCamelCase )[1] # pooled_output lowercase__ = self.visual_projection(UpperCamelCase ) lowercase__ = cosine_distance(UpperCamelCase , self.special_care_embeds ) lowercase__ = cosine_distance(UpperCamelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images lowercase__ = 0.0 lowercase__ = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) lowercase__ = torch.any(special_scores > 0 , dim=1 ) lowercase__ = special_care * 0.01 lowercase__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) lowercase__ = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) lowercase__ = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
2
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = ShapEImgaImgPipeline lowerCAmelCase__ : List[str] = ["""image"""] lowerCAmelCase__ : Any = ["""image"""] lowerCAmelCase__ : Any = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowerCAmelCase__ : Tuple = False @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : str ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase__ (self : int ): '''simple docstring''' return 8 @property def UpperCamelCase__ (self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase__ = CLIPVisionModel(UpperCamelCase ) return model @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor @property def UpperCamelCase__ (self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowercase__ = PriorTransformer(**UpperCamelCase ) return model @property def UpperCamelCase__ (self : int ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowercase__ = ShapERenderer(**UpperCamelCase ) return model def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.dummy_prior lowercase__ = self.dummy_image_encoder lowercase__ = self.dummy_image_processor lowercase__ = self.dummy_renderer lowercase__ = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , ) lowercase__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) lowercase__ = output.images[0] lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = torch_device == '''cpu''' lowercase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = 1 lowercase__ = 2 lowercase__ = self.get_dummy_inputs(UpperCamelCase ) for key in inputs.keys(): if key in self.batch_params: lowercase__ = batch_size * [inputs[key]] lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) lowercase__ = pipe( UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase : List[Any] = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase : str = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : str = field( metadata={"""help""": """The output directory where the model will be written."""} , ) lowerCAmelCase__ : str = field( metadata={ """help""": ( """The encoder model checkpoint for weights initialization.""" """Don't set if you want to train an encoder model from scratch.""" ) } , ) lowerCAmelCase__ : str = field( metadata={ """help""": ( """The decoder model checkpoint for weights initialization.""" """Don't set if you want to train a decoder model from scratch.""" ) } , ) lowerCAmelCase__ : Optional[str] = field( default=lowercase_ , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} ) lowerCAmelCase__ : Optional[str] = field( default=lowercase_ , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = HfArgumentParser((ModelArguments,) ) ((lowercase__) ,) = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: lowercase__ = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: lowercase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: lowercase__ = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: lowercase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed lowercase__ = True lowercase__ = True lowercase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A , decoder_config=A , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens lowercase__ = decoder_config.decoder_start_token_id lowercase__ = decoder_config.pad_token_id if decoder_start_token_id is None: lowercase__ = decoder_config.bos_token_id if pad_token_id is None: lowercase__ = decoder_config.eos_token_id # This is necessary to make Flax's generate() work lowercase__ = decoder_config.eos_token_id lowercase__ = decoder_start_token_id lowercase__ = pad_token_id lowercase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) lowercase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) lowercase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : List[Any] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = """realm""" def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) # Common config lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = retriever_proj_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = num_candidates lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = type_vocab_size lowercase__ = layer_norm_eps # Reader config lowercase__ = span_hidden_size lowercase__ = max_span_width lowercase__ = reader_layer_norm_eps lowercase__ = reader_beam_size lowercase__ = reader_seq_len # Retrieval config lowercase__ = num_block_records lowercase__ = searcher_beam_size
2
1
'''simple docstring''' import requests lowerCamelCase : Dict = '' # <-- Put your OpenWeatherMap appid here! lowerCamelCase : int = 'https://api.openweathermap.org/data/2.5/' def _SCREAMING_SNAKE_CASE (A = "Chicago" , A = APPID ) -> dict: """simple docstring""" return requests.get(URL_BASE + '''weather''' , params=locals() ).json() def _SCREAMING_SNAKE_CASE (A = "Kolkata, India" , A = APPID ) -> dict: """simple docstring""" return requests.get(URL_BASE + '''forecast''' , params=locals() ).json() def _SCREAMING_SNAKE_CASE (A = 55.68 , A = 12.57 , A = APPID ) -> dict: """simple docstring""" return requests.get(URL_BASE + '''onecall''' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: lowerCamelCase : int = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
2
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : int = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = """mvp""" lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""] lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = classifier_dropout lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = use_prompt lowercase__ = prompt_length lowercase__ = prompt_mid_dim super().__init__( pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ): lowercase__ = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " '''The config can simply be saved and uploaded again to be fixed.''' )
2
1
'''simple docstring''' import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCamelCase : str = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('', '|', '|'), datarow=DataRow('', '|', '|'), padding=1, with_header_hide=None, ) lowerCamelCase : int = [] lowerCamelCase : Optional[int] = [] lowerCamelCase : Tuple = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}} lowerCamelCase : Union[str, Any] = [ { 'type': 'header', 'text': { 'type': 'plain_text', 'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""", 'emoji': True, }, } ] lowerCamelCase : Any = 0 for log in Path().glob('*.log'): lowerCamelCase : int = 0 with open(log, 'r') as f: for line in f: lowerCamelCase : Optional[Any] = json.loads(line) if line.get('nodeid', '') != "": lowerCamelCase : Optional[int] = line['nodeid'] if line.get('duration', None) is not None: lowerCamelCase : Any = f"""{line['duration']:.4f}""" if line.get('outcome', '') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCamelCase : Optional[int] = [] log.unlink() lowerCamelCase : Optional[int] = '' lowerCamelCase : int = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" lowerCamelCase : str = [] lowerCamelCase : List[str] = {} for test in failed_tests: lowerCamelCase : Dict = test[0].split('::') lowerCamelCase : Optional[int] = data[0].split('/')[-1] if data[0] not in filesafailed: lowerCamelCase : Optional[Any] = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCamelCase : Any = [test[0] for test in failed_table] lowerCamelCase : List[Any] = list(set(files)) # Count number of instances in failed_tests lowerCamelCase : List[str] = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCamelCase : str = tabulate( table, headers=['Test Location', 'Num Failed'], tablefmt=hf_table_format, stralign='right', ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_000: lowerCamelCase : Dict = 'Too many failed tests, please see the full report in the Action results.' lowerCamelCase : List[Any] = len(err) + 10 lowerCamelCase : List[Any] = message[: 3_000 - offset] + f"""\n...\n```\n{err}""" print(f"""### {message}""") else: lowerCamelCase : Optional[int] = 'No failed tests! 🤗' print(f"""## {message}""") payload.append(no_error_payload) if os.environ.get('TEST_TYPE', '') != "": from slack_sdk import WebClient lowerCamelCase : Optional[int] = WebClient(token=os.environ['SLACK_API_TOKEN']) if message != "No failed tests! 🤗": lowerCamelCase : str = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': message, }, } payload.append(md_report) lowerCamelCase : Dict = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': '*For more details:*', }, 'accessory': { 'type': 'button', 'text': { 'type': 'plain_text', 'text': 'Check Action results', 'emoji': True, }, 'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } payload.append(action_button) lowerCamelCase : Tuple = { 'type': 'context', 'elements': [ { 'type': 'plain_text', 'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""", } ], } payload.append(date_report) lowerCamelCase : Any = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload) lowerCamelCase : Tuple = response.data['ts'] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCamelCase : Any = '' for i, row in enumerate(test_failures): if row[0] != test_class: lowerCamelCase : Optional[Any] = row[0] else: lowerCamelCase : int = '' lowerCamelCase : Optional[int] = { 'type': 'section', 'text': { 'type': 'mrkdwn', 'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""", }, } client.chat_postMessage( channel='#accelerate-ci-daily', thread_ts=ts, blocks=[payload], )
2
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = DebertaVaTokenizer lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast lowerCAmelCase__ : str = True lowerCAmelCase__ : Tuple = True def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = '''this is a test''' lowercase__ = '''this is a test''' return input_text, output_text def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''<pad>''' lowercase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(UpperCamelCase ) , 30001 ) def UpperCamelCase__ (self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''This is a test''' lowercase__ = [13, 1, 4398, 25, 21, 1289] lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) # fmt: off lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DebertaVaTokenizer(UpperCamelCase ) lowercase__ = tokenizer.encode('''sequence builders''' ) lowercase__ = tokenizer.encode('''multi-sequence build''' ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , ) @slow def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings lowerCamelCase : Tuple = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n' @add_start_docstrings(lowercase_ ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = """rag""" lowerCAmelCase__ : List[Any] = True def __init__(self : Dict , UpperCamelCase : List[Any]=None , UpperCamelCase : str=True , UpperCamelCase : List[Any]=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=" / " , UpperCamelCase : Union[str, Any]=" // " , UpperCamelCase : List[str]=5 , UpperCamelCase : Tuple=300 , UpperCamelCase : Optional[int]=768 , UpperCamelCase : int=8 , UpperCamelCase : str="wiki_dpr" , UpperCamelCase : Optional[Any]="train" , UpperCamelCase : Any="compressed" , UpperCamelCase : Dict=None , UpperCamelCase : List[Any]=None , UpperCamelCase : List[Any]=False , UpperCamelCase : str=False , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str=True , UpperCamelCase : int=False , UpperCamelCase : Any=False , UpperCamelCase : Any=False , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : List[Any] , ): '''simple docstring''' super().__init__( bos_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , prefix=UpperCamelCase , vocab_size=UpperCamelCase , **UpperCamelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" lowercase__ = kwargs.pop('''question_encoder''' ) lowercase__ = question_encoder_config.pop('''model_type''' ) lowercase__ = kwargs.pop('''generator''' ) lowercase__ = decoder_config.pop('''model_type''' ) from ..auto.configuration_auto import AutoConfig lowercase__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) lowercase__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) lowercase__ = reduce_loss lowercase__ = label_smoothing lowercase__ = exclude_bos_score lowercase__ = do_marginalize lowercase__ = title_sep lowercase__ = doc_sep lowercase__ = n_docs lowercase__ = max_combined_length lowercase__ = dataset lowercase__ = dataset_split lowercase__ = index_name lowercase__ = retrieval_vector_size lowercase__ = retrieval_batch_size lowercase__ = passages_path lowercase__ = index_path lowercase__ = use_dummy_dataset lowercase__ = output_retrieved lowercase__ = do_deduplication lowercase__ = use_cache if self.forced_eos_token_id is None: lowercase__ = getattr(self.generator , '''forced_eos_token_id''' , UpperCamelCase ) @classmethod def UpperCamelCase__ (cls : Optional[int] , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : int ): '''simple docstring''' return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = copy.deepcopy(self.__dict__ ) lowercase__ = self.question_encoder.to_dict() lowercase__ = self.generator.to_dict() lowercase__ = self.__class__.model_type return output
2
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]: """simple docstring""" lowercase__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE (A ) -> List[str]: """simple docstring""" lowercase__ ,lowercase__ = emb.weight.shape lowercase__ = nn.Linear(A , A , bias=A ) lowercase__ = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]: """simple docstring""" lowercase__ = torch.load(A , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A ) lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A ) if mbart_aa and finetuned: lowercase__ = '''relu''' lowercase__ = state_dict['''decoder.embed_tokens.weight'''] lowercase__ = MBartForConditionalGeneration(A ) model.model.load_state_dict(A ) if finetuned: lowercase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') lowerCamelCase : Any = parser.parse_args() lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
2
1
'''simple docstring''' lowerCamelCase : Dict = 65_521 def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" lowercase__ = 1 lowercase__ = 0 for plain_chr in plain_text: lowercase__ = (a + ord(A )) % MOD_ADLER lowercase__ = (b + a) % MOD_ADLER return (b << 16) | a
2
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCamelCase : List[Any] = logging.getLogger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ): '''simple docstring''' lowercase__ = label_idx def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: lowercase__ = [] lowercase__ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 lowercase__ = [] lowercase__ = [] else: lowercase__ = line.split(''' ''' ) words.append(splits[0] ) if len(UpperCamelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) return examples def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(UpperCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(UpperCamelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] ): '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(UpperCamelCase ): lowercase__ = [] lowercase__ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(UpperCamelCase ) == len(UpperCamelCase ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 return examples def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for sentence in parse_incr(UpperCamelCase ): lowercase__ = preds_list[example_id] lowercase__ = '''''' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(UpperCamelCase ) example_id += 1 def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
2
1
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''ylacombe/bark-small''' lowercase__ = tempfile.mkdtemp() lowercase__ = '''en_speaker_1''' lowercase__ = '''This is a test string''' lowercase__ = '''speaker_embeddings_path.json''' lowercase__ = '''speaker_embeddings''' def UpperCamelCase__ (self : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = BarkProcessor(tokenizer=UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowercase__ = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) lowercase__ = 35 lowercase__ = 2 lowercase__ = 8 lowercase__ = { '''semantic_prompt''': np.ones(UpperCamelCase ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset lowercase__ = processor(text=self.input_string , voice_preset=UpperCamelCase ) lowercase__ = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file lowercase__ = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(UpperCamelCase , **UpperCamelCase ) lowercase__ = processor(text=self.input_string , voice_preset=UpperCamelCase ) lowercase__ = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = BarkProcessor(tokenizer=UpperCamelCase ) lowercase__ = processor(text=self.input_string ) lowercase__ = tokenizer( self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase , return_attention_mask=UpperCamelCase , return_token_type_ids=UpperCamelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = """megatron-bert""" def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache
2
1
'''simple docstring''' import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : Dict = { 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json', # See all BART models at https://huggingface.co/models?filter=bart } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Tuple = """bart""" lowerCAmelCase__ : Any = ["""past_key_values"""] lowerCAmelCase__ : Dict = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self : Any , UpperCamelCase : List[str]=50265 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=12 , UpperCamelCase : Dict=4096 , UpperCamelCase : Any=16 , UpperCamelCase : Tuple=12 , UpperCamelCase : Dict=4096 , UpperCamelCase : int=16 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : str="gelu" , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=0.1 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : int=False , UpperCamelCase : List[str]=True , UpperCamelCase : Dict=3 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : str=0 , UpperCamelCase : List[Any]=2 , UpperCamelCase : int=True , UpperCamelCase : Any=2 , UpperCamelCase : Optional[int]=2 , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = classifier_dropout lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ): lowercase__ = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " '''The config can simply be saved and uploaded again to be fixed.''' ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowercase__ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowercase__ = {0: '''batch'''} lowercase__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''} lowercase__ = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowercase__ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowercase__ ,lowercase__ = self.num_layers for i in range(UpperCamelCase ): lowercase__ = {0: '''batch''', 2: '''past_sequence + sequence'''} lowercase__ = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowercase__ = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def UpperCamelCase__ (self : Any ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowercase__ = super().outputs else: lowercase__ = super(UpperCamelCase , self ).outputs if self.use_past: lowercase__ ,lowercase__ = self.num_layers for i in range(UpperCamelCase ): lowercase__ = {0: '''batch''', 2: '''past_sequence + sequence'''} lowercase__ = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def UpperCamelCase__ (self : int , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowercase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Generate decoder inputs lowercase__ = seq_length if not self.use_past else 1 lowercase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) lowercase__ = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} lowercase__ = dict(**UpperCamelCase , **UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__ ,lowercase__ = common_inputs['''input_ids'''].shape lowercase__ = common_inputs['''decoder_input_ids'''].shape[1] lowercase__ ,lowercase__ = self.num_attention_heads lowercase__ = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase__ = decoder_seq_length + 3 lowercase__ = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowercase__ = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(UpperCamelCase , UpperCamelCase )] , dim=1 ) lowercase__ = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowercase__ ,lowercase__ = self.num_layers lowercase__ = min(UpperCamelCase , UpperCamelCase ) lowercase__ = max(UpperCamelCase , UpperCamelCase ) - min_num_layers lowercase__ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(UpperCamelCase ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase ), ) ) # TODO: test this. lowercase__ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(UpperCamelCase , UpperCamelCase ): common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) ) return common_inputs def UpperCamelCase__ (self : int , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowercase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__ ,lowercase__ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__ = seqlen + 2 lowercase__ ,lowercase__ = self.num_layers lowercase__ ,lowercase__ = self.num_attention_heads lowercase__ = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase__ = common_inputs['''attention_mask'''].dtype lowercase__ = torch.cat( [common_inputs['''attention_mask'''], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 ) lowercase__ = [ (torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(UpperCamelCase ) ] return common_inputs def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowercase__ = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase__ = tokenizer.num_special_tokens_to_add(UpperCamelCase ) lowercase__ = compute_effective_axis_dimension( UpperCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence lowercase__ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowercase__ = dict(tokenizer(UpperCamelCase , return_tensors=UpperCamelCase ) ) return common_inputs def UpperCamelCase__ (self : int , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowercase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) elif self.task == "causal-lm": lowercase__ = self._generate_dummy_inputs_for_causal_lm( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) else: lowercase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) return common_inputs def UpperCamelCase__ (self : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: lowercase__ = super()._flatten_past_key_values_(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) else: lowercase__ = super(UpperCamelCase , self )._flatten_past_key_values_( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
2
'''simple docstring''' # Lint as: python3 import itertools import os import re lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])') lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])') lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)') lowerCamelCase : List[Any] = re.compile(R'(_{2,})') lowerCamelCase : str = R'^\w+(\.\w+)*$' lowerCamelCase : Dict = R'<>:/\|?*' def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A ) lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A ) return name.lower() def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = _single_underscore_re.split(A ) lowercase__ = [_multiple_underscores_re.split(A ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' ) def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , A ): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." ) return f"{filename_prefix_for_name(A )}-{split}" def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) if filetype_suffix: prefix += f".{filetype_suffix}" lowercase__ = os.path.join(A , A ) return f"{filepath}*" def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) lowercase__ = os.path.join(A , A ) if shard_lengths: lowercase__ = len(A ) lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )] if filetype_suffix: lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: lowercase__ = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
2
1
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase : Tuple = 'naver-clova-ix/donut-base' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__ = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__ = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
2
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __lowerCAmelCase : '''simple docstring''' def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = decoder_seq_length # For common tests lowercase__ = self.decoder_seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_model lowercase__ = decoder_layers lowercase__ = decoder_layers lowercase__ = decoder_ffn_dim lowercase__ = decoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = eos_token_id lowercase__ = bos_token_id lowercase__ = pad_token_id lowercase__ = decoder_start_token_id lowercase__ = use_cache lowercase__ = max_position_embeddings lowercase__ = None lowercase__ = decoder_seq_length lowercase__ = 2 lowercase__ = 1 def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ): '''simple docstring''' lowercase__ = True lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval() lowercase__ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) lowercase__ = model(UpperCamelCase ) lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 ) lowercase__ = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ = model(UpperCamelCase )['''last_hidden_state'''] lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state'''] # select random slice lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowercase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else () lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : List[str] = False def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass
2
1
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowercase__ = Image.open(requests.get(A , stream=A ).raw ).convert('''RGB''' ) return image def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") ) rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") ) rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) ) rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") ) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") ) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") ) rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def _SCREAMING_SNAKE_CASE (A , A , A ) -> Tuple: """simple docstring""" lowercase__ = dct.pop(A ) lowercase__ = val def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" ) lowercase__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" ) # next, set bias in the state dict lowercase__ = torch.cat((q_bias, torch.zeros_like(A , requires_grad=A ), v_bias) ) lowercase__ = qkv_bias def _SCREAMING_SNAKE_CASE (A ) -> List[Any]: """simple docstring""" lowercase__ = 364 if '''coco''' in model_name else 224 lowercase__ = InstructBlipVisionConfig(image_size=A ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=32_001 ).to_dict() elif "vicuna-13b" in model_name: lowercase__ = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=32_001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowercase__ = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict() lowercase__ = InstructBlipConfig(vision_config=A , text_config=A , qformer_config=A ) return config, image_size @torch.no_grad() def _SCREAMING_SNAKE_CASE (A , A=None , A=False ) -> Optional[Any]: """simple docstring""" lowercase__ = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowercase__ = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowercase__ = LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowercase__ ,lowercase__ = get_blipa_config(A ) lowercase__ = InstructBlipForConditionalGeneration(A ).eval() lowercase__ = { '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowercase__ ,lowercase__ = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowercase__ = '''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowercase__ = '''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowercase__ ,lowercase__ ,lowercase__ = load_model_and_preprocess( name=A , model_type=A , is_eval=A , device=A ) original_model.eval() print('''Done!''' ) # update state dict keys lowercase__ = original_model.state_dict() lowercase__ = create_rename_keys(A ) for src, dest in rename_keys: rename_key(A , A , A ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase__ = state_dict.pop(A ) if key.startswith('''Qformer.bert''' ): lowercase__ = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowercase__ = key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowercase__ = key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowercase__ = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowercase__ = key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowercase__ = key.replace('''t5''' , '''language''' ) lowercase__ = val # read in qv biases read_in_q_v_bias(A , A ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(A , strict=A ) lowercase__ = load_demo_image() lowercase__ = '''What is unusual about this image?''' # create processor lowercase__ = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=A , image_std=A ) lowercase__ = InstructBlipProcessor( image_processor=A , tokenizer=A , qformer_tokenizer=A , ) lowercase__ = processor(images=A , text=A , return_tensors='''pt''' ).to(A ) # make sure processor creates exact same pixel values lowercase__ = vis_processors['''eval'''](A ).unsqueeze(0 ).to(A ) lowercase__ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , A ) original_model.to(A ) hf_model.to(A ) with torch.no_grad(): if "vicuna" in model_name: lowercase__ = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowercase__ = hf_model(**A ).logits else: lowercase__ = original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowercase__ = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(A ) lowercase__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowercase__ = hf_model(**A , labels=A ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowercase__ = 1E-4 if '''vicuna''' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , A , atol=A ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowercase__ = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowercase__ = hf_model.generate( **A , do_sample=A , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowercase__ = 2 print('''Original generation:''' , A ) lowercase__ = processor.batch_decode(A , skip_special_tokens=A ) lowercase__ = [text.strip() for text in output_text] print('''HF generation:''' , A ) if pytorch_dump_folder_path is not None: processor.save_pretrained(A ) hf_model.save_pretrained(A ) if push_to_hub: processor.push_to_hub(f"Salesforce/{model_name}" ) hf_model.push_to_hub(f"Salesforce/{model_name}" ) if __name__ == "__main__": lowerCamelCase : List[Any] = argparse.ArgumentParser() lowerCamelCase : Dict = [ 'instructblip-vicuna-7b', 'instructblip-vicuna-13b', 'instructblip-flan-t5-xl', 'instructblip-flan-t5-xxl', ] parser.add_argument( '--model_name', default='instructblip-flan-t5-xl', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) lowerCamelCase : Union[str, Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
2
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" if not isinstance(A , A ): raise TypeError('''only integers accepted as input''' ) else: lowercase__ = str(abs(A ) ) lowercase__ = [list(A ) for char in range(len(A ) )] for index in range(len(A ) ): num_transpositions[index].pop(A ) return max( int(''''''.join(list(A ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
2
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _SCREAMING_SNAKE_CASE (A , A , A , A , A ) -> Union[str, Any]: """simple docstring""" lowercase__ = StableDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors lowercase__ = load_file(A ) lowercase__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: lowercase__ = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' ) lowercase__ = pipeline.text_encoder else: lowercase__ = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' ) lowercase__ = pipeline.unet # find the target layer lowercase__ = layer_infos.pop(0 ) while len(A ) > -1: try: lowercase__ = curr_layer.__getattr__(A ) if len(A ) > 0: lowercase__ = layer_infos.pop(0 ) elif len(A ) == 0: break except Exception: if len(A ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: lowercase__ = layer_infos.pop(0 ) lowercase__ = [] if "lora_down" in key: pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) ) pair_keys.append(A ) else: pair_keys.append(A ) pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: lowercase__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) lowercase__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A , A ).unsqueeze(2 ).unsqueeze(3 ) else: lowercase__ = state_dict[pair_keys[0]].to(torch.floataa ) lowercase__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(A , A ) # update visited list for item in pair_keys: visited.append(A ) return pipeline if __name__ == "__main__": lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.7_5, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') lowerCamelCase : List[Any] = parser.parse_args() lowerCamelCase : Dict = args.base_model_path lowerCamelCase : str = args.checkpoint_path lowerCamelCase : Optional[int] = args.dump_path lowerCamelCase : str = args.lora_prefix_unet lowerCamelCase : Any = args.lora_prefix_text_encoder lowerCamelCase : List[Any] = args.alpha lowerCamelCase : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) lowerCamelCase : Optional[int] = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
2
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCamelCase : str = Mapping[str, np.ndarray] lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict. lowerCamelCase : Any = 0.0_1 @dataclasses.dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent lowerCAmelCase__ : Optional[Sequence[int]] = None def _SCREAMING_SNAKE_CASE (A ) -> Protein: """simple docstring""" lowercase__ = R'''(\[[A-Z]+\]\n)''' lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0] lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowercase__ = ["N", "CA", "C"] lowercase__ = None lowercase__ = None lowercase__ = None for g in groups: if "[PRIMARY]" == g[0]: lowercase__ = g[1][0].strip() for i in range(len(A ) ): if seq[i] not in residue_constants.restypes: lowercase__ = '''X''' # FIXME: strings are immutable lowercase__ = np.array( [residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowercase__ = [] for axis in range(3 ): tertiary.append(list(map(A , g[1][axis].split() ) ) ) lowercase__ = np.array(A ) lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowercase__ = np.zeros( ( len(A ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , ) def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) lowercase__ = prot.parents lowercase__ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowercase__ = [p for i, p in zip(A , A ) if i == chain_id] if parents is None or len(A ) == 0: lowercase__ = ['''N/A'''] pdb_headers.append(f"PARENT {' '.join(A )}" ) return pdb_headers def _SCREAMING_SNAKE_CASE (A , A ) -> str: """simple docstring""" lowercase__ = [] lowercase__ = pdb_str.split('''\n''' ) lowercase__ = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) lowercase__ = 42 if prot.parents is not None and len(prot.parents ) > 0: lowercase__ = [] if prot.parents_chain_index is not None: lowercase__ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(A ) , [] ) parent_dict[str(A )].append(A ) lowercase__ = max([int(A ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] ) parents_per_chain.append(A ) else: parents_per_chain.append(list(prot.parents ) ) else: lowercase__ = [['''N/A''']] def make_parent_line(A ) -> str: return f"PARENT {' '.join(A )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowercase__ = 0 for i, l in enumerate(A ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(A ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(A ): lowercase__ = parents_per_chain[chain_counter] else: lowercase__ = ['''N/A'''] out_pdb_lines.append(make_parent_line(A ) ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = residue_constants.restypes + ['''X'''] def res_atoa(A ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowercase__ = residue_constants.atom_types lowercase__ = [] lowercase__ = prot.atom_mask lowercase__ = prot.aatype lowercase__ = prot.atom_positions lowercase__ = prot.residue_index.astype(np.intaa ) lowercase__ = prot.b_factors lowercase__ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowercase__ = get_pdb_headers(A ) if len(A ) > 0: pdb_lines.extend(A ) lowercase__ = aatype.shape[0] lowercase__ = 1 lowercase__ = 0 lowercase__ = string.ascii_uppercase lowercase__ = None # Add all atom sites. for i in range(A ): lowercase__ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowercase__ = '''ATOM''' lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}" lowercase__ = '''''' lowercase__ = '''''' lowercase__ = 1.00 lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works. lowercase__ = '''''' lowercase__ = '''A''' if chain_index is not None: lowercase__ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowercase__ = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(A ) atom_index += 1 lowercase__ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowercase__ = True lowercase__ = chain_index[i + 1] if should_terminate: # Close the chain. lowercase__ = '''TER''' lowercase__ = ( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(A ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(A , A ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
2
1
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Tuple = GPTSanJapaneseTokenizer lowerCAmelCase__ : List[str] = False lowerCAmelCase__ : Dict = {"""do_clean_text""": False, """add_prefix_space""": False} def UpperCamelCase__ (self : Any ): '''simple docstring''' super().setUp() # fmt: off lowercase__ = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>'''] # fmt: on lowercase__ = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀 lowercase__ = {'''unk_token''': '''<unk>'''} lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) with open(self.emoji_file , '''w''' ) as emoji_writer: emoji_writer.write(json.dumps(UpperCamelCase ) ) def UpperCamelCase__ (self : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCamelCase__ (self : Any , UpperCamelCase : Union[str, Any] ): '''simple docstring''' lowercase__ = '''こんにちは、世界。 \nこんばんは、㔺界。😀''' lowercase__ = '''こんにちは、世界。 \nこんばんは、世界。😀''' return input_text, output_text def UpperCamelCase__ (self : Tuple , UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ ,lowercase__ = self.get_input_output_texts(UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowercase__ = tokenizer.decode(UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) return text, ids def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass # TODO add if relevant def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = self.get_tokenizer() # Testing tokenization lowercase__ = '''こんにちは、世界。 こんばんは、㔺界。''' lowercase__ = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。'''] lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) # Testing conversion to ids without special tokens lowercase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) # Testing conversion to ids with special tokens lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_tokenizer() # Testing tokenization lowercase__ = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。''' lowercase__ = '''こんにちは、、、、世界。こんばんは、、、、世界。''' lowercase__ = tokenizer.encode(UpperCamelCase ) lowercase__ = tokenizer.decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) @slow def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) # Testing tokenization lowercase__ = '''こんにちは、世界。''' lowercase__ = '''こんばんは、㔺界。😀''' lowercase__ = '''こんにちは、世界。こんばんは、世界。😀''' lowercase__ = tokenizer.encode(prefix_text + input_text ) lowercase__ = tokenizer.encode('''''' , prefix_text=prefix_text + input_text ) lowercase__ = tokenizer.encode(UpperCamelCase , prefix_text=UpperCamelCase ) lowercase__ = tokenizer.decode(UpperCamelCase ) lowercase__ = tokenizer.decode(UpperCamelCase ) lowercase__ = tokenizer.decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) @slow def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) # Testing tokenization lowercase__ = '''こんにちは、世界。''' lowercase__ = '''こんばんは、㔺界。😀''' lowercase__ = len(tokenizer.encode(UpperCamelCase ) ) - 2 lowercase__ = len(tokenizer.encode(UpperCamelCase ) ) - 2 lowercase__ = [1] + [0] * (len_prefix + len_text + 1) lowercase__ = [1] * (len_prefix + len_text + 1) + [0] lowercase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowercase__ = tokenizer(prefix_text + input_text ).token_type_ids lowercase__ = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids lowercase__ = tokenizer(UpperCamelCase , prefix_text=UpperCamelCase ).token_type_ids self.assertListEqual(UpperCamelCase , UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @slow def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) lowercase__ = tokenizer.encode('''あンいワ''' ) lowercase__ = tokenizer.encode('''''' , prefix_text='''あンいワ''' ) lowercase__ = tokenizer.encode('''いワ''' , prefix_text='''あン''' ) self.assertEqual(tokenizer.decode(UpperCamelCase ) , tokenizer.decode(UpperCamelCase ) ) self.assertEqual(tokenizer.decode(UpperCamelCase ) , tokenizer.decode(UpperCamelCase ) ) self.assertNotEqual(UpperCamelCase , UpperCamelCase ) self.assertNotEqual(UpperCamelCase , UpperCamelCase ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) lowercase__ = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']] lowercase__ = tokenizer(UpperCamelCase , padding=UpperCamelCase ) lowercase__ = tokenizer.batch_encode_plus(UpperCamelCase , padding=UpperCamelCase ) # fmt: off lowercase__ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]] lowercase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowercase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , UpperCamelCase ) self.assertListEqual(x_token.token_type_ids , UpperCamelCase ) self.assertListEqual(x_token.attention_mask , UpperCamelCase ) self.assertListEqual(x_token_a.input_ids , UpperCamelCase ) self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase ) self.assertListEqual(x_token_a.attention_mask , UpperCamelCase ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]: """simple docstring""" lowercase__ = [] create_all_state(1 , A , A , [] , A ) return result def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None: """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def _SCREAMING_SNAKE_CASE (A ) -> None: """simple docstring""" for i in total_list: print(*A ) if __name__ == "__main__": lowerCamelCase : Tuple = 4 lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = generate_all_combinations(n, k) print_all_state(total_list)
2
1
'''simple docstring''' import string def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = '''''' for i in sequence: lowercase__ = ord(A ) if 65 <= extract <= 90: output += chr(155 - extract ) elif 97 <= extract <= 122: output += chr(219 - extract ) else: output += i return output def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = string.ascii_letters lowercase__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(A )] if c in letters else c for c in sequence ) def _SCREAMING_SNAKE_CASE () -> None: """simple docstring""" from timeit import timeit print('''Running performance benchmarks...''' ) lowercase__ = '''from string import printable ; from __main__ import atbash, atbash_slow''' print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=A )} seconds" ) print(f"> atbash(): {timeit('atbash(printable)' , setup=A )} seconds" ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f"""{example} encrypted in atbash: {atbash(example)}""") benchmark()
2
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase : Optional[Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCamelCase : Tuple = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCamelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCamelCase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCamelCase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCamelCase : Optional[int] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCamelCase : Dict = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) ) lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _SCREAMING_SNAKE_CASE (A = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def _SCREAMING_SNAKE_CASE () -> Tuple: """simple docstring""" lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS] lowercase__ = poker_hands.copy() shuffle(A ) lowercase__ = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__ = True lowercase__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = 0 lowercase__ = os.path.abspath(os.path.dirname(A ) ) lowercase__ = os.path.join(A , '''poker_hands.txt''' ) with open(A ) as file_hand: for line in file_hand: lowercase__ = line[:14].strip() lowercase__ = line[15:].strip() lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A ) lowercase__ = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
2
1
'''simple docstring''' class __lowerCAmelCase : '''simple docstring''' def __init__(self : str , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ): '''simple docstring''' lowercase__ = name lowercase__ = value lowercase__ = weight def __repr__(self : Union[str, Any] ): '''simple docstring''' return f"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})" def UpperCamelCase__ (self : str ): '''simple docstring''' return self.value def UpperCamelCase__ (self : Any ): '''simple docstring''' return self.name def UpperCamelCase__ (self : Tuple ): '''simple docstring''' return self.weight def UpperCamelCase__ (self : Dict ): '''simple docstring''' return self.value / self.weight def _SCREAMING_SNAKE_CASE (A , A , A ) -> Dict: """simple docstring""" lowercase__ = [] for i in range(len(A ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[Any]: """simple docstring""" lowercase__ = sorted(A , key=A , reverse=A ) lowercase__ = [] lowercase__ ,lowercase__ = 0.0, 0.0 for i in range(len(A ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _SCREAMING_SNAKE_CASE () -> Dict: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
2
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCamelCase : str = parser.parse_args() if args.model_type == "bert": lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase : Any = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCamelCase : int = model.state_dict() lowerCamelCase : int = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] lowerCamelCase : Tuple = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] lowerCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] lowerCamelCase : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] lowerCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] lowerCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] lowerCamelCase : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowerCamelCase : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""] lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
2
1
'''simple docstring''' from argparse import ArgumentParser from .env import EnvironmentCommand def _SCREAMING_SNAKE_CASE () -> Optional[Any]: """simple docstring""" lowercase__ = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' ) lowercase__ = parser.add_subparsers(help='''diffusers-cli command helpers''' ) # Register commands EnvironmentCommand.register_subcommand(A ) # Let's go lowercase__ = parser.parse_args() if not hasattr(A , '''func''' ): parser.print_help() exit(1 ) # Run lowercase__ = args.func(A ) service.run() if __name__ == "__main__": main()
2
'''simple docstring''' from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ): '''simple docstring''' lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
2
1
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase : str = 'python tqdm regex requests packaging filelock numpy tokenizers'.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('dataclasses') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('importlib_metadata') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _SCREAMING_SNAKE_CASE (A , A=None ) -> Optional[int]: """simple docstring""" require_version(deps[pkg] , A )
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Tuple = """cvt""" def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) lowercase__ = num_channels lowercase__ = patch_sizes lowercase__ = patch_stride lowercase__ = patch_padding lowercase__ = embed_dim lowercase__ = num_heads lowercase__ = depth lowercase__ = mlp_ratio lowercase__ = attention_drop_rate lowercase__ = drop_rate lowercase__ = drop_path_rate lowercase__ = qkv_bias lowercase__ = cls_token lowercase__ = qkv_projection_method lowercase__ = kernel_qkv lowercase__ = padding_kv lowercase__ = stride_kv lowercase__ = padding_q lowercase__ = stride_q lowercase__ = initializer_range lowercase__ = layer_norm_eps
2
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = ShapEImgaImgPipeline lowerCAmelCase__ : List[str] = ["""image"""] lowerCAmelCase__ : Any = ["""image"""] lowerCAmelCase__ : Any = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowerCAmelCase__ : Tuple = False @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : str ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase__ (self : int ): '''simple docstring''' return 8 @property def UpperCamelCase__ (self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase__ = CLIPVisionModel(UpperCamelCase ) return model @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor @property def UpperCamelCase__ (self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowercase__ = PriorTransformer(**UpperCamelCase ) return model @property def UpperCamelCase__ (self : int ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowercase__ = ShapERenderer(**UpperCamelCase ) return model def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.dummy_prior lowercase__ = self.dummy_image_encoder lowercase__ = self.dummy_image_processor lowercase__ = self.dummy_renderer lowercase__ = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , ) lowercase__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) lowercase__ = output.images[0] lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = torch_device == '''cpu''' lowercase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = 1 lowercase__ = 2 lowercase__ = self.get_dummy_inputs(UpperCamelCase ) for key in inputs.keys(): if key in self.batch_params: lowercase__ = batch_size * [inputs[key]] lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) lowercase__ = pipe( UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
2
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase : List[Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowerCamelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase : str = np.expand_dims(test_image, axis=0) lowerCamelCase : List[str] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase : Any = 'Normal' if result[0][0] == 1: lowerCamelCase : Any = 'Abnormality detected'
2
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : Optional[int] = MBartConfig lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : Optional[int] = """gelu""" def __init__(self : Tuple , UpperCamelCase : Any , UpperCamelCase : int=13 , UpperCamelCase : List[str]=7 , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=False , UpperCamelCase : Dict=99 , UpperCamelCase : Dict=32 , UpperCamelCase : Tuple=2 , UpperCamelCase : Any=4 , UpperCamelCase : Dict=37 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Dict=20 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : str=0 , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = eos_token_id lowercase__ = pad_token_id lowercase__ = bos_token_id def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__ = prepare_mbart_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ): '''simple docstring''' lowercase__ = TFMBartModel(config=UpperCamelCase ).get_decoder() lowercase__ = inputs_dict['''input_ids'''] lowercase__ = input_ids[:1, :] lowercase__ = inputs_dict['''attention_mask'''][:1, :] lowercase__ = inputs_dict['''head_mask'''] lowercase__ = 1 # first forward pass lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase ) lowercase__ ,lowercase__ = outputs.to_tuple() lowercase__ = past_key_values[1] def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None , A=None , A=None , A=None , ) -> List[Any]: """simple docstring""" if attention_mask is None: lowercase__ = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase__ : Optional[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ : Optional[int] = ( { """conversational""": TFMBartForConditionalGeneration, """feature-extraction""": TFMBartModel, """summarization""": TFMBartForConditionalGeneration, """text2text-generation""": TFMBartForConditionalGeneration, """translation""": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ : Any = True lowerCAmelCase__ : str = False lowerCAmelCase__ : str = False def UpperCamelCase__ (self : Any , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ): '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = TFMBartModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase ) @require_sentencepiece @require_tokenizers @require_tf class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = [ """ UN Chief Says There Is No Military Solution in Syria""", ] lowerCAmelCase__ : str = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", ] lowerCAmelCase__ : str = """facebook/mbart-large-en-ro""" @cached_property def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def UpperCamelCase__ (self : Union[str, Any] , **UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = self.translate_src_text(**UpperCamelCase ) self.assertListEqual(self.expected_text , UpperCamelCase ) def UpperCamelCase__ (self : Tuple , **UpperCamelCase : Dict ): '''simple docstring''' lowercase__ = self.tokenizer(self.src_text , **UpperCamelCase , return_tensors='''tf''' ) lowercase__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) lowercase__ = self.tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) return generated_words @slow def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' self._assert_generated_batch_equal_expected()
2
'''simple docstring''' class __lowerCAmelCase : # Public class to implement a graph '''simple docstring''' def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = row lowercase__ = col lowercase__ = graph def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): # And finally, count all islands. '''simple docstring''' lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += 1 return count
2
1
'''simple docstring''' import datasets lowerCamelCase : str = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' lowerCamelCase : Any = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' lowerCamelCase : List[str] = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ (self : List[str] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def UpperCamelCase__ (self : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ): '''simple docstring''' return {"accuracy": simple_accuracy(UpperCamelCase , UpperCamelCase )}
2
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase : Tuple = 'naver-clova-ix/donut-base' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__ = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__ = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' import logging import math from functools import partial from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union import torch from .tensor_utils import tensor_tree_map, tree_map def _SCREAMING_SNAKE_CASE (A ) -> List[Tuple[int, ...]]: """simple docstring""" lowercase__ = [] if isinstance(A , A ): for v in tree.values(): shapes.extend(_fetch_dims(A ) ) elif isinstance(A , (list, tuple) ): for t in tree: shapes.extend(_fetch_dims(A ) ) elif isinstance(A , torch.Tensor ): shapes.append(tree.shape ) else: raise ValueError('''Not supported''' ) return shapes @torch.jit.ignore def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple[int, ...]: """simple docstring""" lowercase__ = [] for d in reversed(A ): idx.append(flat_idx % d ) lowercase__ = flat_idx // d return tuple(reversed(A ) ) @torch.jit.ignore def _SCREAMING_SNAKE_CASE (A , A , A , A = None , A = None , ) -> List[Tuple[slice, ...]]: """simple docstring""" def reduce_edge_list(A ) -> None: lowercase__ = True for i in range(len(A ) ): lowercase__ = -1 * (i + 1) l[reversed_idx] &= tally lowercase__ = l[reversed_idx] if start_edges is None: lowercase__ = [s == 0 for s in start] reduce_edge_list(A ) if end_edges is None: lowercase__ = [e == (d - 1) for e, d in zip(A , A )] reduce_edge_list(A ) # Base cases. Either start/end are empty and we're done, or the final, # one-dimensional tensor can be simply sliced if len(A ) == 0: return [()] elif len(A ) == 1: return [(slice(start[0] , end[0] + 1 ),)] lowercase__ = [] lowercase__ = [] # Dimensions common to start and end can be selected directly for s, e in zip(A , A ): if s == e: path_list.append(slice(A , s + 1 ) ) else: break lowercase__ = tuple(A ) lowercase__ = len(A ) # start == end, and we're done if divergence_idx == len(A ): return [path] def upper() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None lowercase__ = start[divergence_idx] return tuple( path + (slice(A , sdi + 1 ),) + s for s in _get_minimal_slice_set( start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) ) def lower() -> Tuple[Tuple[slice, ...], ...]: assert start_edges is not None assert end_edges is not None lowercase__ = end[divergence_idx] return tuple( path + (slice(A , edi + 1 ),) + s for s in _get_minimal_slice_set( [0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) ) # If both start and end are at the edges of the subtree rooted at # divergence_idx, we can just select the whole subtree at once if start_edges[divergence_idx] and end_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) ) # If just start is at the edge, we can grab almost all of the subtree, # treating only the ragged bottom edge as an edge case elif start_edges[divergence_idx]: slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) ) slices.extend(lower() ) # Analogous to the previous case, but the top is ragged this time elif end_edges[divergence_idx]: slices.extend(upper() ) slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) ) # If both sides of the range are ragged, we need to handle both sides # separately. If there's contiguous meat in between them, we can index it # in one big chunk else: slices.extend(upper() ) lowercase__ = end[divergence_idx] - start[divergence_idx] if middle_ground > 1: slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) ) slices.extend(lower() ) return slices @torch.jit.ignore def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> torch.Tensor: """simple docstring""" lowercase__ = t.shape[:no_batch_dims] lowercase__ = list(_flat_idx_to_idx(A , A ) ) # _get_minimal_slice_set is inclusive lowercase__ = list(_flat_idx_to_idx(flat_end - 1 , A ) ) # Get an ordered list of slices to perform lowercase__ = _get_minimal_slice_set( A , A , A , ) lowercase__ = [t[s] for s in slices] return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] ) def _SCREAMING_SNAKE_CASE (A , A , A , A , A = False , A = None , A = False , ) -> Any: """simple docstring""" if not (len(A ) > 0): raise ValueError('''Must provide at least one input''' ) lowercase__ = [shape[:no_batch_dims] for shape in _fetch_dims(A )] lowercase__ = tuple([max(A ) for s in zip(*A )] ) def _prep_inputs(A ) -> torch.Tensor: if not low_mem: if not sum(t.shape[:no_batch_dims] ) == no_batch_dims: lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) lowercase__ = t.reshape(-1 , *t.shape[no_batch_dims:] ) else: lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] ) return t lowercase__ = tensor_tree_map(_prep_inputs , A ) lowercase__ = None if _out is not None: lowercase__ = tensor_tree_map(lambda A : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out ) lowercase__ = 1 for d in orig_batch_dims: flat_batch_dim *= d lowercase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0) def _select_chunk(A ) -> torch.Tensor: return t[i : i + chunk_size] if t.shape[0] != 1 else t lowercase__ = 0 lowercase__ = prepped_outputs for _ in range(A ): # Chunk the input if not low_mem: lowercase__ = _select_chunk else: lowercase__ = partial( _chunk_slice , flat_start=A , flat_end=min(A , i + chunk_size ) , no_batch_dims=len(A ) , ) lowercase__ = tensor_tree_map(A , A ) # Run the layer on the chunk lowercase__ = layer(**A ) # Allocate space for the output if out is None: lowercase__ = tensor_tree_map(lambda A : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , A ) # Put the chunk in its pre-allocated space if isinstance(A , A ): def assign(A , A ) -> None: for k, v in da.items(): if isinstance(A , A ): assign(A , da[k] ) else: if _add_into_out: v[i : i + chunk_size] += da[k] else: lowercase__ = da[k] assign(A , A ) elif isinstance(A , A ): for xa, xa in zip(A , A ): if _add_into_out: xa[i : i + chunk_size] += xa else: lowercase__ = xa elif isinstance(A , torch.Tensor ): if _add_into_out: out[i : i + chunk_size] += output_chunk else: lowercase__ = output_chunk else: raise ValueError('''Not supported''' ) i += chunk_size lowercase__ = tensor_tree_map(lambda A : t.view(orig_batch_dims + t.shape[1:] ) , A ) return out class __lowerCAmelCase : '''simple docstring''' def __init__(self : Tuple , UpperCamelCase : int = 512 , ): '''simple docstring''' lowercase__ = max_chunk_size lowercase__ = None lowercase__ = None def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : Callable , UpperCamelCase : tuple , UpperCamelCase : int ): '''simple docstring''' logging.info('''Tuning chunk size...''' ) if min_chunk_size >= self.max_chunk_size: return min_chunk_size lowercase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )] lowercase__ = [c for c in candidates if c > min_chunk_size] lowercase__ = [min_chunk_size] + candidates candidates[-1] += 4 def test_chunk_size(UpperCamelCase : int ) -> bool: try: with torch.no_grad(): fn(*UpperCamelCase , chunk_size=UpperCamelCase ) return True except RuntimeError: return False lowercase__ = 0 lowercase__ = len(UpperCamelCase ) - 1 while i > min_viable_chunk_size_index: lowercase__ = test_chunk_size(candidates[i] ) if not viable: lowercase__ = (min_viable_chunk_size_index + i) // 2 else: lowercase__ = i lowercase__ = (i + len(UpperCamelCase ) - 1) // 2 return candidates[min_viable_chunk_size_index] def UpperCamelCase__ (self : int , UpperCamelCase : Iterable , UpperCamelCase : Iterable ): '''simple docstring''' lowercase__ = True for aa, aa in zip(UpperCamelCase , UpperCamelCase ): assert type(UpperCamelCase ) == type(UpperCamelCase ) if isinstance(UpperCamelCase , (list, tuple) ): consistent &= self._compare_arg_caches(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase : x[0] )] lowercase__ = [v for _, v in sorted(aa.items() , key=lambda UpperCamelCase : x[0] )] consistent &= self._compare_arg_caches(UpperCamelCase , UpperCamelCase ) else: consistent &= aa == aa return consistent def UpperCamelCase__ (self : int , UpperCamelCase : Callable , UpperCamelCase : tuple , UpperCamelCase : int , ): '''simple docstring''' lowercase__ = True lowercase__ = tree_map(lambda UpperCamelCase : a.shape if isinstance(UpperCamelCase , torch.Tensor ) else a , UpperCamelCase , UpperCamelCase ) if self.cached_arg_data is not None: # If args have changed shape/value, we need to re-tune assert len(self.cached_arg_data ) == len(UpperCamelCase ) lowercase__ = self._compare_arg_caches(self.cached_arg_data , UpperCamelCase ) else: # Otherwise, we can reuse the precomputed value lowercase__ = False if not consistent: lowercase__ = self._determine_favorable_chunk_size( UpperCamelCase , UpperCamelCase , UpperCamelCase , ) lowercase__ = arg_data assert self.cached_chunk_size is not None return self.cached_chunk_size
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A ) -> bool: """simple docstring""" return len(set(A ) ) == len(A ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert' lowerCamelCase : Optional[int] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') lowerCamelCase : Optional[Any] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = cached_file(UpperCamelCase , UpperCamelCase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCamelCase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase , UpperCamelCase ) ) ) with open(os.path.join(UpperCamelCase , '''refs''' , '''main''' ) ) as f: lowercase__ = f.read() self.assertEqual(UpperCamelCase , os.path.join(UpperCamelCase , '''snapshots''' , UpperCamelCase , UpperCamelCase ) ) self.assertTrue(os.path.isfile(UpperCamelCase ) ) # File is cached at the same place the second time. lowercase__ = cached_file(UpperCamelCase , UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase ) # Using a specific revision to test the full commit hash. lowercase__ = cached_file(UpperCamelCase , UpperCamelCase , revision='''9b8c223''' ) self.assertEqual(UpperCamelCase , os.path.join(UpperCamelCase , '''snapshots''' , UpperCamelCase , UpperCamelCase ) ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' with self.assertRaisesRegex(UpperCamelCase , '''is not a valid model identifier''' ): lowercase__ = cached_file('''tiny-random-bert''' , UpperCamelCase ) with self.assertRaisesRegex(UpperCamelCase , '''is not a valid git identifier''' ): lowercase__ = cached_file(UpperCamelCase , UpperCamelCase , revision='''aaaa''' ) with self.assertRaisesRegex(UpperCamelCase , '''does not appear to have a file named''' ): lowercase__ = cached_file(UpperCamelCase , '''conf''' ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' with self.assertRaisesRegex(UpperCamelCase , '''does not appear to have a file named''' ): lowercase__ = cached_file(UpperCamelCase , '''conf''' ) with open(os.path.join(UpperCamelCase , '''refs''' , '''main''' ) ) as f: lowercase__ = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase , '''.no_exist''' , UpperCamelCase , '''conf''' ) ) ) lowercase__ = cached_file(UpperCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=UpperCamelCase ) self.assertIsNone(UpperCamelCase ) lowercase__ = cached_file(UpperCamelCase , '''conf''' , local_files_only=UpperCamelCase , _raise_exceptions_for_missing_entries=UpperCamelCase ) self.assertIsNone(UpperCamelCase ) lowercase__ = mock.Mock() lowercase__ = 500 lowercase__ = {} lowercase__ = HTTPError lowercase__ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('''requests.Session.request''' , return_value=UpperCamelCase ) as mock_head: lowercase__ = cached_file(UpperCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=UpperCamelCase ) self.assertIsNone(UpperCamelCase ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase__ (self : Dict ): '''simple docstring''' self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase ) ) self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , UpperCamelCase ) ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCamelCase , '''is not a valid model identifier''' ): get_file_from_repo('''bert-base-case''' , UpperCamelCase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCamelCase , '''is not a valid git identifier''' ): get_file_from_repo('''bert-base-cased''' , UpperCamelCase , revision='''ahaha''' ) lowercase__ = get_file_from_repo('''bert-base-cased''' , UpperCamelCase ) # The name is the cached name which is not very easy to test, so instead we load the content. lowercase__ = json.loads(open(UpperCamelCase , '''r''' ).read() ) self.assertEqual(config['''hidden_size'''] , 768 ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ = Path(UpperCamelCase ) / '''a.txt''' filename.touch() self.assertEqual(get_file_from_repo(UpperCamelCase , '''a.txt''' ) , str(UpperCamelCase ) ) self.assertIsNone(get_file_from_repo(UpperCamelCase , '''b.txt''' ) )
2
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCamelCase : Any = None lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : List[str] = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase : Any = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""] lowerCAmelCase__ : Optional[int] = TaTokenizer lowerCAmelCase__ : List[int] = [] def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True lowercase__ = extra_ids @staticmethod def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , ) return max_model_length def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowercase__ = os.path.join( UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) logger.info(f"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase__ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
2
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : '''simple docstring''' def __init__(self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str]=13 , UpperCamelCase : Optional[Any]=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=True , UpperCamelCase : Dict=99 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Dict=5 , UpperCamelCase : List[Any]=4 , UpperCamelCase : Optional[int]=37 , UpperCamelCase : str="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : Optional[Any]=512 , UpperCamelCase : Optional[Any]=16 , UpperCamelCase : str=2 , UpperCamelCase : Any=0.02 , UpperCamelCase : Tuple=3 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : Union[str, Any]=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ (self : str ): '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : int ): '''simple docstring''' lowercase__ = NystromformerModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowercase__ = model(UpperCamelCase , token_type_ids=UpperCamelCase ) lowercase__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ (self : Tuple , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' lowercase__ = NystromformerForMaskedLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ): '''simple docstring''' lowercase__ = NystromformerForQuestionAnswering(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ (self : str , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = NystromformerForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = NystromformerForTokenClassification(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ): '''simple docstring''' lowercase__ = self.num_choices lowercase__ = NystromformerForMultipleChoice(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) , ) = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Any = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ : Dict = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : Dict = False def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = NystromformerModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ = type self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) @slow def UpperCamelCase__ (self : List[str] ): '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = NystromformerModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @require_torch class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) lowercase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): lowercase__ = model(UpperCamelCase )[0] lowercase__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase ) lowercase__ = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) ) @slow def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = '''the [MASK] of Belgium is Brussels''' lowercase__ = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) lowercase__ = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) lowercase__ = tokenizer(UpperCamelCase , return_tensors='''pt''' ) with torch.no_grad(): lowercase__ = model(encoding.input_ids ).logits lowercase__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase ) , '''capital''' )
2
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = ShapEImgaImgPipeline lowerCAmelCase__ : List[str] = ["""image"""] lowerCAmelCase__ : Any = ["""image"""] lowerCAmelCase__ : Any = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowerCAmelCase__ : Tuple = False @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : str ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase__ (self : int ): '''simple docstring''' return 8 @property def UpperCamelCase__ (self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase__ = CLIPVisionModel(UpperCamelCase ) return model @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor @property def UpperCamelCase__ (self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowercase__ = PriorTransformer(**UpperCamelCase ) return model @property def UpperCamelCase__ (self : int ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowercase__ = ShapERenderer(**UpperCamelCase ) return model def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.dummy_prior lowercase__ = self.dummy_image_encoder lowercase__ = self.dummy_image_processor lowercase__ = self.dummy_renderer lowercase__ = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , ) lowercase__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) lowercase__ = output.images[0] lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = torch_device == '''cpu''' lowercase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = 1 lowercase__ = 2 lowercase__ = self.get_dummy_inputs(UpperCamelCase ) for key in inputs.keys(): if key in self.batch_params: lowercase__ = batch_size * [inputs[key]] lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) lowercase__ = pipe( UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" lowercase__ = [[0 for _ in range(A )] for _ in range(m + 1 )] for i in range(m + 1 ): lowercase__ = 1 for n in range(m + 1 ): for k in range(1 , A ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: lowerCamelCase : Any = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: lowerCamelCase : List[Any] = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
2
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase : str = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
1
'''simple docstring''' from datetime import datetime as dt import os from github import Github lowerCamelCase : int = [ 'good first issue', 'good second issue', 'good difficult issue', 'feature request', 'new model', 'wip', ] def _SCREAMING_SNAKE_CASE () -> str: """simple docstring""" lowercase__ = Github(os.environ['''GITHUB_TOKEN'''] ) lowercase__ = g.get_repo('''huggingface/transformers''' ) lowercase__ = repo.get_issues(state='''open''' ) for issue in open_issues: lowercase__ = sorted([comment for comment in issue.get_comments()] , key=lambda A : i.created_at , reverse=A ) lowercase__ = comments[0] if len(A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='''closed''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) if __name__ == "__main__": main()
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : List[Any] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = """realm""" def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) # Common config lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = retriever_proj_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = num_candidates lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = type_vocab_size lowercase__ = layer_norm_eps # Reader config lowercase__ = span_hidden_size lowercase__ = max_span_width lowercase__ = reader_layer_norm_eps lowercase__ = reader_beam_size lowercase__ = reader_seq_len # Retrieval config lowercase__ = num_block_records lowercase__ = searcher_beam_size
2
1
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __lowerCAmelCase : '''simple docstring''' def __init__(self : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=13 , UpperCamelCase : List[str]=7 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=99 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Optional[int]=36 , UpperCamelCase : Union[str, Any]=6 , UpperCamelCase : Dict=6 , UpperCamelCase : Dict=6 , UpperCamelCase : Any=37 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : Dict=16 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : Any=3 , UpperCamelCase : Dict=4 , UpperCamelCase : Tuple=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = embedding_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_hidden_groups lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = AlbertModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) lowercase__ = model(UpperCamelCase , token_type_ids=UpperCamelCase ) lowercase__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ): '''simple docstring''' lowercase__ = AlbertForPreTraining(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , sentence_order_label=UpperCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def UpperCamelCase__ (self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Tuple ): '''simple docstring''' lowercase__ = AlbertForMaskedLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ (self : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Tuple ): '''simple docstring''' lowercase__ = AlbertForQuestionAnswering(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__ (self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Dict ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = AlbertForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__ (self : str , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' lowercase__ = self.num_labels lowercase__ = AlbertForTokenClassification(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' lowercase__ = self.num_choices lowercase__ = AlbertForMultipleChoice(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) ,( lowercase__ ) , ) = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ : Dict = ( { """feature-extraction""": AlbertModel, """fill-mask""": AlbertForMaskedLM, """question-answering""": AlbertForQuestionAnswering, """text-classification""": AlbertForSequenceClassification, """token-classification""": AlbertForTokenClassification, """zero-shot""": AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ : int = True def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : Any=False ): '''simple docstring''' lowercase__ = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class in get_values(UpperCamelCase ): lowercase__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase ) lowercase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase ) return inputs_dict def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = AlbertModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase__ = type self.model_tester.create_and_check_model(*UpperCamelCase ) @slow def UpperCamelCase__ (self : Tuple ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = AlbertModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @require_torch class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = AlbertModel.from_pretrained('''albert-base-v2''' ) lowercase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] lowercase__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCamelCase ) lowercase__ = torch.tensor( [[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1E-4 ) )
2
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : int = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = """mvp""" lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""] lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = classifier_dropout lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = use_prompt lowercase__ = prompt_length lowercase__ = prompt_mid_dim super().__init__( pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ): lowercase__ = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " '''The config can simply be saved and uploaded again to be fixed.''' )
2
1
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = ComputeEnvironment.AMAZON_SAGEMAKER lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : str = """ml.p3.2xlarge""" lowerCAmelCase__ : Optional[Any] = """accelerate_sagemaker_execution_role""" lowerCAmelCase__ : Optional[int] = """hf-sm""" lowerCAmelCase__ : List[Any] = """us-east-1""" lowerCAmelCase__ : Tuple = 1 lowerCAmelCase__ : List[str] = """accelerate-sagemaker-1""" lowerCAmelCase__ : Any = """1.6""" lowerCAmelCase__ : Optional[Any] = """4.4""" lowerCAmelCase__ : Union[str, Any] = """train.py""" lowerCAmelCase__ : str = [ """--model_name_or_path""", """bert""", """--do_train""", """False""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] lowerCAmelCase__ : Tuple = [ """--model_name_or_path""", """bert""", """--do_train""", """--do_test""", """False""", """--do_predict""", """--epochs""", """3""", """--learning_rate""", """5e-5""", """--max_steps""", """50.5""", ] class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , UpperCamelCase ) assert isinstance(converted_args['''do_train'''] , UpperCamelCase ) assert isinstance(converted_args['''epochs'''] , UpperCamelCase ) assert isinstance(converted_args['''learning_rate'''] , UpperCamelCase ) assert isinstance(converted_args['''max_steps'''] , UpperCamelCase ) with pytest.raises(UpperCamelCase ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
2
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = DebertaVaTokenizer lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast lowerCAmelCase__ : str = True lowerCAmelCase__ : Tuple = True def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = '''this is a test''' lowercase__ = '''this is a test''' return input_text, output_text def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''<pad>''' lowercase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(UpperCamelCase ) , 30001 ) def UpperCamelCase__ (self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''This is a test''' lowercase__ = [13, 1, 4398, 25, 21, 1289] lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) # fmt: off lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DebertaVaTokenizer(UpperCamelCase ) lowercase__ = tokenizer.encode('''sequence builders''' ) lowercase__ = tokenizer.encode('''multi-sequence build''' ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , ) @slow def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
1
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : List[str] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } lowerCamelCase : Dict = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def _SCREAMING_SNAKE_CASE (A , A , A , A , A ) -> List[str]: """simple docstring""" for attribute in key.split('''.''' ): lowercase__ = getattr(A , A ) if weight_type is not None: lowercase__ = getattr(A , A ).shape else: lowercase__ = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": lowercase__ = value elif weight_type == "weight_g": lowercase__ = value elif weight_type == "weight_v": lowercase__ = value elif weight_type == "bias": lowercase__ = value else: lowercase__ = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" lowercase__ = [] lowercase__ = fairseq_model.state_dict() lowercase__ = hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase__ = False if "conv_layers" in name: load_conv_layer( A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , ) lowercase__ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: lowercase__ = True if "*" in mapped_key: lowercase__ = name.split(A )[0].split('''.''' )[-2] lowercase__ = mapped_key.replace('''*''' , A ) if "weight_g" in name: lowercase__ = '''weight_g''' elif "weight_v" in name: lowercase__ = '''weight_v''' elif "bias" in name and "relative_attention_bias" not in name: lowercase__ = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ = '''weight''' else: lowercase__ = None set_recursively(A , A , A , A , A ) continue if not is_used: unused_weights.append(A ) logger.warning(f"Unused weights: {unused_weights}" ) def _SCREAMING_SNAKE_CASE (A , A , A , A , A ) -> List[str]: """simple docstring""" lowercase__ = full_name.split('''conv_layers.''' )[-1] lowercase__ = name.split('''.''' ) lowercase__ = int(items[0] ) lowercase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) lowercase__ = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) lowercase__ = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) lowercase__ = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) lowercase__ = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(A ) @torch.no_grad() def _SCREAMING_SNAKE_CASE (A , A , A=None ) -> Union[str, Any]: """simple docstring""" lowercase__ = torch.load(A ) lowercase__ = WavLMConfigOrig(checkpoint['''cfg'''] ) lowercase__ = WavLMOrig(A ) model.load_state_dict(checkpoint['''model'''] ) model.eval() if config_path is not None: lowercase__ = WavLMConfig.from_pretrained(A ) else: lowercase__ = WavLMConfig() lowercase__ = WavLMModel(A ) recursively_load_weights(A , A ) hf_wavlm.save_pretrained(A ) if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') lowerCamelCase : int = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
2
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]: """simple docstring""" lowercase__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE (A ) -> List[str]: """simple docstring""" lowercase__ ,lowercase__ = emb.weight.shape lowercase__ = nn.Linear(A , A , bias=A ) lowercase__ = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]: """simple docstring""" lowercase__ = torch.load(A , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A ) lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A ) if mbart_aa and finetuned: lowercase__ = '''relu''' lowercase__ = state_dict['''decoder.embed_tokens.weight'''] lowercase__ = MBartForConditionalGeneration(A ) model.model.load_state_dict(A ) if finetuned: lowercase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') lowerCamelCase : Any = parser.parse_args() lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
2
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : Tuple = { 'vocab_file': { 'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt', 'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt', 'junnyu/roformer_chinese_char_small': ( 'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt' ), 'junnyu/roformer_chinese_char_base': ( 'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt' ), 'junnyu/roformer_small_discriminator': ( 'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt' ), 'junnyu/roformer_small_generator': ( 'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt' ), } } lowerCamelCase : int = { 'junnyu/roformer_chinese_small': 1_536, 'junnyu/roformer_chinese_base': 1_536, 'junnyu/roformer_chinese_char_small': 512, 'junnyu/roformer_chinese_char_base': 512, 'junnyu/roformer_small_discriminator': 128, 'junnyu/roformer_small_generator': 128, } lowerCamelCase : Union[str, Any] = { 'junnyu/roformer_chinese_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_base': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_small': {'do_lower_case': True}, 'junnyu/roformer_chinese_char_base': {'do_lower_case': True}, 'junnyu/roformer_small_discriminator': {'do_lower_case': True}, 'junnyu/roformer_small_generator': {'do_lower_case': True}, } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES lowerCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ : List[str] = RoFormerTokenizer def __init__(self : int , UpperCamelCase : Dict=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Tuple="[UNK]" , UpperCamelCase : Union[str, Any]="[SEP]" , UpperCamelCase : int="[PAD]" , UpperCamelCase : List[Any]="[CLS]" , UpperCamelCase : Optional[int]="[MASK]" , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=None , **UpperCamelCase : List[Any] , ): '''simple docstring''' super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , ) lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get('''lowercase''' , UpperCamelCase ) != do_lower_case or pre_tok_state.get('''strip_accents''' , UpperCamelCase ) != strip_accents ): lowercase__ = getattr(UpperCamelCase , pre_tok_state.pop('''type''' ) ) lowercase__ = do_lower_case lowercase__ = strip_accents lowercase__ = pre_tok_class(**UpperCamelCase ) lowercase__ = do_lower_case def __getstate__(self : Optional[int] ): '''simple docstring''' lowercase__ = self.__dict__.copy() lowercase__ = BertPreTokenizer() return state def __setstate__(self : int , UpperCamelCase : Tuple ): '''simple docstring''' lowercase__ = d lowercase__ = self.__dict__['''_tokenizer'''].get_vocab() lowercase__ = PreTokenizer.custom(JiebaPreTokenizer(UpperCamelCase ) ) def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str]=None ): '''simple docstring''' lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase__ (self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase__ (self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' lowercase__ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def UpperCamelCase__ (self : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Union[str, Any]=False , **UpperCamelCase : Any , ): '''simple docstring''' lowercase__ = BertPreTokenizer() return super().save_pretrained(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase )
2
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCamelCase : List[Any] = logging.getLogger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ): '''simple docstring''' lowercase__ = label_idx def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: lowercase__ = [] lowercase__ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 lowercase__ = [] lowercase__ = [] else: lowercase__ = line.split(''' ''' ) words.append(splits[0] ) if len(UpperCamelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) return examples def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(UpperCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(UpperCamelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] ): '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(UpperCamelCase ): lowercase__ = [] lowercase__ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(UpperCamelCase ) == len(UpperCamelCase ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 return examples def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for sentence in parse_incr(UpperCamelCase ): lowercase__ = preds_list[example_id] lowercase__ = '''''' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(UpperCamelCase ) example_id += 1 def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
2
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = { 'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json', # See all M-CTC-T models at https://huggingface.co/models?filter=mctct } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = """mctct""" def __init__(self : Any , UpperCamelCase : str=8065 , UpperCamelCase : List[str]=1536 , UpperCamelCase : List[Any]=36 , UpperCamelCase : List[Any]=6144 , UpperCamelCase : str=4 , UpperCamelCase : str=384 , UpperCamelCase : List[Any]=920 , UpperCamelCase : Any=1E-5 , UpperCamelCase : str=0.3 , UpperCamelCase : List[Any]="relu" , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Tuple=0.3 , UpperCamelCase : Tuple=0.3 , UpperCamelCase : Any=1 , UpperCamelCase : Optional[int]=0 , UpperCamelCase : Tuple=2 , UpperCamelCase : int=1 , UpperCamelCase : int=0.3 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Dict=(7,) , UpperCamelCase : Optional[Any]=(3,) , UpperCamelCase : Union[str, Any]=80 , UpperCamelCase : int=1 , UpperCamelCase : Dict=None , UpperCamelCase : Any="sum" , UpperCamelCase : List[str]=False , **UpperCamelCase : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = intermediate_size lowercase__ = num_attention_heads lowercase__ = attention_head_dim lowercase__ = max_position_embeddings lowercase__ = layer_norm_eps lowercase__ = layerdrop lowercase__ = hidden_act lowercase__ = initializer_range lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = pad_token_id lowercase__ = bos_token_id lowercase__ = eos_token_id lowercase__ = conv_glu_dim lowercase__ = conv_dropout lowercase__ = num_conv_layers lowercase__ = input_feat_per_channel lowercase__ = input_channels lowercase__ = conv_channels lowercase__ = ctc_loss_reduction lowercase__ = ctc_zero_infinity # prevents config testing fail with exporting to json lowercase__ = list(UpperCamelCase ) lowercase__ = list(UpperCamelCase ) if len(self.conv_kernel ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` ''' f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, " f"`config.num_conv_layers = {self.num_conv_layers}`." )
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = """megatron-bert""" def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache
2
1
'''simple docstring''' import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py lowerCamelCase : Union[str, Any] = '.' if __name__ == "__main__": lowerCamelCase : Dict = os.path.join(REPO_PATH, 'utils/documentation_tests.txt') lowerCamelCase : Union[str, Any] = [] lowerCamelCase : Optional[Any] = [] with open(doctest_file_path) as fp: for line in fp: lowerCamelCase : Union[str, Any] = line.strip() lowerCamelCase : str = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: lowerCamelCase : List[Any] = '\n'.join(non_existent_paths) raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
2
'''simple docstring''' # Lint as: python3 import itertools import os import re lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])') lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])') lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)') lowerCamelCase : List[Any] = re.compile(R'(_{2,})') lowerCamelCase : str = R'^\w+(\.\w+)*$' lowerCamelCase : Dict = R'<>:/\|?*' def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A ) lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A ) return name.lower() def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = _single_underscore_re.split(A ) lowercase__ = [_multiple_underscores_re.split(A ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' ) def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , A ): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." ) return f"{filename_prefix_for_name(A )}-{split}" def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) if filetype_suffix: prefix += f".{filetype_suffix}" lowercase__ = os.path.join(A , A ) return f"{filepath}*" def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) lowercase__ = os.path.join(A , A ) if shard_lengths: lowercase__ = len(A ) lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )] if filetype_suffix: lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: lowercase__ = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
2
1
'''simple docstring''' lowerCamelCase : List[Any] = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
2
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __lowerCAmelCase : '''simple docstring''' def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = decoder_seq_length # For common tests lowercase__ = self.decoder_seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_model lowercase__ = decoder_layers lowercase__ = decoder_layers lowercase__ = decoder_ffn_dim lowercase__ = decoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = eos_token_id lowercase__ = bos_token_id lowercase__ = pad_token_id lowercase__ = decoder_start_token_id lowercase__ = use_cache lowercase__ = max_position_embeddings lowercase__ = None lowercase__ = decoder_seq_length lowercase__ = 2 lowercase__ = 1 def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ): '''simple docstring''' lowercase__ = True lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval() lowercase__ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) lowercase__ = model(UpperCamelCase ) lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 ) lowercase__ = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ = model(UpperCamelCase )['''last_hidden_state'''] lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state'''] # select random slice lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowercase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else () lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : List[str] = False def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass
2
1
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) lowercase__ = AutoTokenizer.from_pretrained('''google/mt5-small''' ) lowercase__ = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids lowercase__ = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids lowercase__ = shift_tokens_right(UpperCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id ) lowercase__ = model(UpperCamelCase , decoder_input_ids=UpperCamelCase ).logits lowercase__ = optax.softmax_cross_entropy(UpperCamelCase , onehot(UpperCamelCase , logits.shape[-1] ) ).mean() lowercase__ = -(labels.shape[-1] * loss.item()) lowercase__ = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
2
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" if not isinstance(A , A ): raise TypeError('''only integers accepted as input''' ) else: lowercase__ = str(abs(A ) ) lowercase__ = [list(A ) for char in range(len(A ) )] for index in range(len(A ) ): num_transpositions[index].pop(A ) return max( int(''''''.join(list(A ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
2
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Optional[Any] = { 'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json', # See all LeViT models at https://huggingface.co/models?filter=levit } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = """levit""" def __init__(self : str , UpperCamelCase : Union[str, Any]=224 , UpperCamelCase : List[Any]=3 , UpperCamelCase : Dict=3 , UpperCamelCase : Any=2 , UpperCamelCase : Tuple=1 , UpperCamelCase : int=16 , UpperCamelCase : Union[str, Any]=[128, 256, 384] , UpperCamelCase : List[str]=[4, 8, 12] , UpperCamelCase : Optional[Any]=[4, 4, 4] , UpperCamelCase : Optional[int]=[16, 16, 16] , UpperCamelCase : str=0 , UpperCamelCase : Dict=[2, 2, 2] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Any=0.02 , **UpperCamelCase : Dict , ): '''simple docstring''' super().__init__(**UpperCamelCase ) lowercase__ = image_size lowercase__ = num_channels lowercase__ = kernel_size lowercase__ = stride lowercase__ = padding lowercase__ = hidden_sizes lowercase__ = num_attention_heads lowercase__ = depths lowercase__ = key_dim lowercase__ = drop_path_rate lowercase__ = patch_size lowercase__ = attention_ratio lowercase__ = mlp_ratio lowercase__ = initializer_range lowercase__ = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = version.parse("""1.11""" ) @property def UpperCamelCase__ (self : List[str] ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return 1E-4
2
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCamelCase : str = Mapping[str, np.ndarray] lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict. lowerCamelCase : Any = 0.0_1 @dataclasses.dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent lowerCAmelCase__ : Optional[Sequence[int]] = None def _SCREAMING_SNAKE_CASE (A ) -> Protein: """simple docstring""" lowercase__ = R'''(\[[A-Z]+\]\n)''' lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0] lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowercase__ = ["N", "CA", "C"] lowercase__ = None lowercase__ = None lowercase__ = None for g in groups: if "[PRIMARY]" == g[0]: lowercase__ = g[1][0].strip() for i in range(len(A ) ): if seq[i] not in residue_constants.restypes: lowercase__ = '''X''' # FIXME: strings are immutable lowercase__ = np.array( [residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowercase__ = [] for axis in range(3 ): tertiary.append(list(map(A , g[1][axis].split() ) ) ) lowercase__ = np.array(A ) lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowercase__ = np.zeros( ( len(A ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , ) def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) lowercase__ = prot.parents lowercase__ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowercase__ = [p for i, p in zip(A , A ) if i == chain_id] if parents is None or len(A ) == 0: lowercase__ = ['''N/A'''] pdb_headers.append(f"PARENT {' '.join(A )}" ) return pdb_headers def _SCREAMING_SNAKE_CASE (A , A ) -> str: """simple docstring""" lowercase__ = [] lowercase__ = pdb_str.split('''\n''' ) lowercase__ = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) lowercase__ = 42 if prot.parents is not None and len(prot.parents ) > 0: lowercase__ = [] if prot.parents_chain_index is not None: lowercase__ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(A ) , [] ) parent_dict[str(A )].append(A ) lowercase__ = max([int(A ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] ) parents_per_chain.append(A ) else: parents_per_chain.append(list(prot.parents ) ) else: lowercase__ = [['''N/A''']] def make_parent_line(A ) -> str: return f"PARENT {' '.join(A )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowercase__ = 0 for i, l in enumerate(A ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(A ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(A ): lowercase__ = parents_per_chain[chain_counter] else: lowercase__ = ['''N/A'''] out_pdb_lines.append(make_parent_line(A ) ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = residue_constants.restypes + ['''X'''] def res_atoa(A ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowercase__ = residue_constants.atom_types lowercase__ = [] lowercase__ = prot.atom_mask lowercase__ = prot.aatype lowercase__ = prot.atom_positions lowercase__ = prot.residue_index.astype(np.intaa ) lowercase__ = prot.b_factors lowercase__ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowercase__ = get_pdb_headers(A ) if len(A ) > 0: pdb_lines.extend(A ) lowercase__ = aatype.shape[0] lowercase__ = 1 lowercase__ = 0 lowercase__ = string.ascii_uppercase lowercase__ = None # Add all atom sites. for i in range(A ): lowercase__ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowercase__ = '''ATOM''' lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}" lowercase__ = '''''' lowercase__ = '''''' lowercase__ = 1.00 lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works. lowercase__ = '''''' lowercase__ = '''A''' if chain_index is not None: lowercase__ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowercase__ = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(A ) atom_index += 1 lowercase__ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowercase__ = True lowercase__ = chain_index[i + 1] if should_terminate: # Close the chain. lowercase__ = '''TER''' lowercase__ = ( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(A ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(A , A ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
2
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A , A ) -> tuple[float, float]: """simple docstring""" if not len(A ) == len(A ) == 3: raise ValueError('''Please enter a valid equation.''' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('''Both a & b of two equations can\'t be zero.''' ) # Extract the coefficients lowercase__ ,lowercase__ ,lowercase__ = equationa lowercase__ ,lowercase__ ,lowercase__ = equationa # Calculate the determinants of the matrices lowercase__ = aa * ba - aa * ba lowercase__ = ca * ba - ca * ba lowercase__ = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('''Infinite solutions. (Consistent system)''' ) else: raise ValueError('''No solution. (Inconsistent system)''' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: lowercase__ = determinant_x / determinant lowercase__ = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]: """simple docstring""" lowercase__ = [] create_all_state(1 , A , A , [] , A ) return result def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None: """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def _SCREAMING_SNAKE_CASE (A ) -> None: """simple docstring""" for i in total_list: print(*A ) if __name__ == "__main__": lowerCamelCase : Tuple = 4 lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = generate_all_combinations(n, k) print_all_state(total_list)
2
1
'''simple docstring''' import operator as op lowerCamelCase : Dict = 'scaler.pt' lowerCamelCase : Optional[Any] = 'pytorch_model' lowerCamelCase : List[Any] = 'random_states' lowerCamelCase : Union[str, Any] = 'optimizer' lowerCamelCase : str = 'scheduler' lowerCamelCase : int = 'pytorch_model.bin' lowerCamelCase : Optional[Any] = 'pytorch_model.bin.index.json' lowerCamelCase : List[Any] = 'model.safetensors' lowerCamelCase : Any = 'model.safetensors.index.json' lowerCamelCase : str = '1.10.2' lowerCamelCase : List[str] = 'py38' lowerCamelCase : List[Any] = '4.17.0' lowerCamelCase : Union[str, Any] = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge'] lowerCamelCase : Optional[int] = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2'] lowerCamelCase : Any = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP'] lowerCamelCase : Tuple = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH'] lowerCamelCase : Tuple = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT'] lowerCamelCase : Optional[int] = '2.0.1' lowerCamelCase : str = ['pdsh', 'standard', 'openmpi', 'mvapich'] lowerCamelCase : str = ['default', 'reduce-overhead', 'max-autotune'] lowerCamelCase : Optional[Any] = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 lowerCamelCase : List[Any] = [ 'nnodes', 'nproc_per_node', 'rdzv_backend', 'rdzv_endpoint', 'rdzv_id', 'rdzv_conf', 'standalone', 'max_restarts', 'monitor_interval', 'start_method', 'role', 'module', 'm', 'no_python', 'run_path', 'log_dir', 'r', 'redirects', 't', 'tee', 'node_rank', 'master_addr', 'master_port', ] lowerCamelCase : List[Any] = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM'] lowerCamelCase : Optional[Any] = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
2
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase : Optional[Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCamelCase : Tuple = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCamelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCamelCase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCamelCase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCamelCase : Optional[int] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCamelCase : Dict = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) ) lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _SCREAMING_SNAKE_CASE (A = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def _SCREAMING_SNAKE_CASE () -> Tuple: """simple docstring""" lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS] lowercase__ = poker_hands.copy() shuffle(A ) lowercase__ = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__ = True lowercase__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = 0 lowercase__ = os.path.abspath(os.path.dirname(A ) ) lowercase__ = os.path.join(A , '''poker_hands.txt''' ) with open(A ) as file_hand: for line in file_hand: lowercase__ = line[:14].strip() lowercase__ = line[15:].strip() lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A ) lowercase__ = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
2
1
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A , A ) -> list[str]: """simple docstring""" if nth_term == "": return [""] lowercase__ = int(A ) lowercase__ = int(A ) lowercase__ = [] for temp in range(int(A ) ): series.append(f"1 / {pow(temp + 1 , int(A ) )}" if series else '''1''' ) return series if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Optional[Any] = int(input('Enter the last number (nth term) of the P-Series')) lowerCamelCase : Optional[Any] = int(input('Enter the power for P-Series')) print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p') print(p_series(nth_term, power))
2
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCamelCase : str = parser.parse_args() if args.model_type == "bert": lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase : Any = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCamelCase : int = model.state_dict() lowerCamelCase : int = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] lowerCamelCase : Tuple = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] lowerCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] lowerCamelCase : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] lowerCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] lowerCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] lowerCamelCase : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowerCamelCase : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""] lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
2
1
'''simple docstring''' import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : Tuple = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n' class __lowerCAmelCase (lowercase_ ): '''simple docstring''' @add_start_docstrings(UpperCamelCase ) def __call__(self : int , UpperCamelCase : torch.LongTensor , UpperCamelCase : torch.FloatTensor , **UpperCamelCase : str ): '''simple docstring''' raise NotImplementedError('''StoppingCriteria needs to be subclassed''' ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : str , UpperCamelCase : int , UpperCamelCase : Optional[int] = None ): '''simple docstring''' lowercase__ = max_length lowercase__ = max_position_embeddings @add_start_docstrings(UpperCamelCase ) def __call__(self : Any , UpperCamelCase : torch.LongTensor , UpperCamelCase : torch.FloatTensor , **UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = input_ids.shape[-1] lowercase__ = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( '''This is a friendly reminder - the current text generation call will exceed the model\'s predefined ''' f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe " '''exceptions, performance degradation, or nothing at all.''' ) return is_done class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' warnings.warn( '''The class `MaxNewTokensCriteria` is deprecated. ''' f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` " '''with `max_length = start_length + max_new_tokens` instead.''' , UpperCamelCase , ) lowercase__ = start_length lowercase__ = max_new_tokens lowercase__ = start_length + max_new_tokens @add_start_docstrings(UpperCamelCase ) def __call__(self : Dict , UpperCamelCase : torch.LongTensor , UpperCamelCase : torch.FloatTensor , **UpperCamelCase : Dict ): '''simple docstring''' return input_ids.shape[-1] >= self.max_length class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] , UpperCamelCase : float , UpperCamelCase : Optional[float] = None ): '''simple docstring''' lowercase__ = max_time lowercase__ = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(UpperCamelCase ) def __call__(self : List[Any] , UpperCamelCase : torch.LongTensor , UpperCamelCase : torch.FloatTensor , **UpperCamelCase : int ): '''simple docstring''' return time.time() - self.initial_timestamp > self.max_time class __lowerCAmelCase (lowercase_ ): '''simple docstring''' @add_start_docstrings(UpperCamelCase ) def __call__(self : str , UpperCamelCase : torch.LongTensor , UpperCamelCase : torch.FloatTensor , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return any(criteria(UpperCamelCase , UpperCamelCase ) for criteria in self ) @property def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' for stopping_criterium in self: if isinstance(UpperCamelCase , UpperCamelCase ): return stopping_criterium.max_length elif isinstance(UpperCamelCase , UpperCamelCase ): return stopping_criterium.max_length return None def _SCREAMING_SNAKE_CASE (A , A ) -> StoppingCriteriaList: """simple docstring""" lowercase__ = stopping_criteria.max_length lowercase__ = deepcopy(A ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , A ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=A ) ) return new_stopping_criteria
2
'''simple docstring''' from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ): '''simple docstring''' lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
2
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCamelCase : Dict = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = ['DeiTFeatureExtractor'] lowerCamelCase : Optional[int] = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[str] = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[Any] = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Tuple = """cvt""" def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) lowercase__ = num_channels lowercase__ = patch_sizes lowercase__ = patch_stride lowercase__ = patch_padding lowercase__ = embed_dim lowercase__ = num_heads lowercase__ = depth lowercase__ = mlp_ratio lowercase__ = attention_drop_rate lowercase__ = drop_rate lowercase__ = drop_path_rate lowercase__ = qkv_bias lowercase__ = cls_token lowercase__ = qkv_projection_method lowercase__ = kernel_qkv lowercase__ = padding_kv lowercase__ = stride_kv lowercase__ = padding_q lowercase__ = stride_q lowercase__ = initializer_range lowercase__ = layer_norm_eps
2
1
'''simple docstring''' import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = MobileBertConfig.from_json_file(A ) print(f"Building PyTorch model from configuration: {config}" ) lowercase__ = MobileBertForPreTraining(A ) # Load weights from tf checkpoint lowercase__ = load_tf_weights_in_mobilebert(A , A , A ) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , A ) if __name__ == "__main__": lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--mobilebert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained MobileBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowerCamelCase : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
2
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase : List[Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowerCamelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase : str = np.expand_dims(test_image, axis=0) lowerCamelCase : List[str] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase : Any = 'Normal' if result[0][0] == 1: lowerCamelCase : Any = 'Abnormality detected'
2
1
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowerCamelCase : Dict = parser.parse_args() lowerCamelCase : List[str] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
2
'''simple docstring''' class __lowerCAmelCase : # Public class to implement a graph '''simple docstring''' def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = row lowercase__ = col lowercase__ = graph def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): # And finally, count all islands. '''simple docstring''' lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += 1 return count
2
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A = 600_851_475_143 ) -> int: """simple docstring""" try: lowercase__ = int(A ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) lowercase__ = 2 lowercase__ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 lowercase__ = i while n % i == 0: lowercase__ = n // i i += 1 return int(A ) if __name__ == "__main__": print(f"""{solution() = }""")
2
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase : Tuple = 'naver-clova-ix/donut-base' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__ = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__ = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : List[Any] = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) lowerCamelCase : Union[str, Any] = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", f"""decoder.layers.{i}.sa_qcontent_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", f"""decoder.layers.{i}.sa_kcontent_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", f"""decoder.layers.{i}.sa_qpos_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", f"""decoder.layers.{i}.sa_kpos_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.weight""", f"""decoder.layers.{i}.sa_v_proj.weight""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", f"""decoder.layers.{i}.ca_qcontent_proj.weight""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", f"""decoder.layers.{i}.ca_kcontent_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", f"""decoder.layers.{i}.ca_kpos_proj.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.weight""", f"""decoder.layers.{i}.ca_v_proj.weight""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", f"""decoder.layers.{i}.ca_qpos_sine_proj.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", f"""decoder.layers.{i}.sa_qcontent_proj.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", f"""decoder.layers.{i}.sa_kcontent_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", f"""decoder.layers.{i}.sa_qpos_proj.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", f"""decoder.layers.{i}.sa_kpos_proj.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.sa_v_proj.bias""", f"""decoder.layers.{i}.sa_v_proj.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", f"""decoder.layers.{i}.ca_qcontent_proj.bias""") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", f"""decoder.layers.{i}.ca_kcontent_proj.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", f"""decoder.layers.{i}.ca_kpos_proj.bias""")) rename_keys.append((f"""transformer.decoder.layers.{i}.ca_v_proj.bias""", f"""decoder.layers.{i}.ca_v_proj.bias""")) rename_keys.append( (f"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", f"""decoder.layers.{i}.ca_qpos_sine_proj.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[int]: """simple docstring""" lowercase__ = state_dict.pop(A ) lowercase__ = val def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowercase__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' ) lowercase__ = value else: lowercase__ = value return new_state_dict def _SCREAMING_SNAKE_CASE (A , A=False ) -> Any: """simple docstring""" lowercase__ = '''''' if is_panoptic: lowercase__ = '''conditional_detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowercase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" ) lowercase__ = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" ) # next, add query, keys and values (in that order) to the state dict lowercase__ = in_proj_weight[:256, :] lowercase__ = in_proj_bias[:256] lowercase__ = in_proj_weight[256:512, :] lowercase__ = in_proj_bias[256:512] lowercase__ = in_proj_weight[-256:, :] lowercase__ = in_proj_bias[-256:] def _SCREAMING_SNAKE_CASE () -> Any: """simple docstring""" lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ = Image.open(requests.get(A , stream=A ).raw ) return im @torch.no_grad() def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" lowercase__ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: lowercase__ = '''resnet101''' if "dc5" in model_name: lowercase__ = True lowercase__ = '''panoptic''' in model_name if is_panoptic: lowercase__ = 250 else: lowercase__ = 91 lowercase__ = '''huggingface/label-files''' lowercase__ = '''coco-detection-id2label.json''' lowercase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) ) lowercase__ = {int(A ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} # load image processor lowercase__ = '''coco_panoptic''' if is_panoptic else '''coco_detection''' lowercase__ = ConditionalDetrImageProcessor(format=A ) # prepare image lowercase__ = prepare_img() lowercase__ = image_processor(images=A , return_tensors='''pt''' ) lowercase__ = encoding['''pixel_values'''] logger.info(f"Converting model {model_name}..." ) # load original model from torch hub lowercase__ = torch.hub.load('''DeppMeng/ConditionalDETR''' , A , pretrained=A ).eval() lowercase__ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: lowercase__ = '''conditional_detr.''' + src rename_key(A , A , A ) lowercase__ = rename_backbone_keys(A ) # query, key and value matrices need special treatment read_in_q_k_v(A , is_panoptic=A ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowercase__ = '''conditional_detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('''conditional_detr''' ) and not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ) ): lowercase__ = state_dict.pop(A ) lowercase__ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowercase__ = state_dict.pop(A ) lowercase__ = val elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ): continue else: lowercase__ = state_dict.pop(A ) lowercase__ = val else: if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ): lowercase__ = state_dict.pop(A ) lowercase__ = val # finally, create HuggingFace model and load state dict lowercase__ = ConditionalDetrForSegmentation(A ) if is_panoptic else ConditionalDetrForObjectDetection(A ) model.load_state_dict(A ) model.eval() model.push_to_hub(repo_id=A , organization='''DepuMeng''' , commit_message='''Add model''' ) # verify our conversion lowercase__ = conditional_detr(A ) lowercase__ = model(A ) assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 ) # Save model and image processor logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(A ).mkdir(exist_ok=A ) model.save_pretrained(A ) image_processor.save_pretrained(A ) if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) lowerCamelCase : List[str] = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A ) -> bool: """simple docstring""" return len(set(A ) ) == len(A ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
'''simple docstring''' from __future__ import annotations import math def _SCREAMING_SNAKE_CASE (A ) -> list[int]: """simple docstring""" if num <= 0: lowercase__ = f"{num}: Invalid input, please enter a positive integer." raise ValueError(A ) lowercase__ = [True] * (num + 1) lowercase__ = [] lowercase__ = 2 lowercase__ = int(math.sqrt(A ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(A ) # Set multiples of start be False for i in range(start * start , num + 1 , A ): if sieve[i] is True: lowercase__ = False start += 1 for j in range(end + 1 , num + 1 ): if sieve[j] is True: prime.append(A ) return prime if __name__ == "__main__": print(prime_sieve(int(input('Enter a positive integer: ').strip())))
2
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCamelCase : Any = None lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : List[str] = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase : Any = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""] lowerCAmelCase__ : Optional[int] = TaTokenizer lowerCAmelCase__ : List[int] = [] def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True lowercase__ = extra_ids @staticmethod def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , ) return max_model_length def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowercase__ = os.path.join( UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) logger.info(f"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase__ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
2
1
'''simple docstring''' import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[Any] = """Speech2TextFeatureExtractor""" lowerCAmelCase__ : Tuple = """Speech2TextTokenizer""" def __init__(self : Dict , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) lowercase__ = self.feature_extractor lowercase__ = False def __call__(self : Dict , *UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*UpperCamelCase , **UpperCamelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) lowercase__ = kwargs.pop('''raw_speech''' ) else: lowercase__ = kwargs.pop('''audio''' , UpperCamelCase ) lowercase__ = kwargs.pop('''sampling_rate''' , UpperCamelCase ) lowercase__ = kwargs.pop('''text''' , UpperCamelCase ) if len(UpperCamelCase ) > 0: lowercase__ = args[0] lowercase__ = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: lowercase__ = self.feature_extractor(UpperCamelCase , *UpperCamelCase , sampling_rate=UpperCamelCase , **UpperCamelCase ) if text is not None: lowercase__ = self.tokenizer(UpperCamelCase , **UpperCamelCase ) if text is None: return inputs elif audio is None: return encodings else: lowercase__ = encodings['''input_ids'''] return inputs def UpperCamelCase__ (self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] , *UpperCamelCase : Dict , **UpperCamelCase : List[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @contextmanager def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) lowercase__ = True lowercase__ = self.tokenizer yield lowercase__ = self.feature_extractor lowercase__ = False
2
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = ShapEImgaImgPipeline lowerCAmelCase__ : List[str] = ["""image"""] lowerCAmelCase__ : Any = ["""image"""] lowerCAmelCase__ : Any = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowerCAmelCase__ : Tuple = False @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : str ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase__ (self : int ): '''simple docstring''' return 8 @property def UpperCamelCase__ (self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase__ = CLIPVisionModel(UpperCamelCase ) return model @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor @property def UpperCamelCase__ (self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowercase__ = PriorTransformer(**UpperCamelCase ) return model @property def UpperCamelCase__ (self : int ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowercase__ = ShapERenderer(**UpperCamelCase ) return model def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.dummy_prior lowercase__ = self.dummy_image_encoder lowercase__ = self.dummy_image_processor lowercase__ = self.dummy_renderer lowercase__ = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , ) lowercase__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) lowercase__ = output.images[0] lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = torch_device == '''cpu''' lowercase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = 1 lowercase__ = 2 lowercase__ = self.get_dummy_inputs(UpperCamelCase ) for key in inputs.keys(): if key in self.batch_params: lowercase__ = batch_size * [inputs[key]] lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) lowercase__ = pipe( UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = MgpstrTokenizer lowerCAmelCase__ : Dict = False lowerCAmelCase__ : str = {} lowerCAmelCase__ : int = False def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' super().setUp() # fmt: off lowercase__ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on lowercase__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase ) + '''\n''' ) def UpperCamelCase__ (self : Tuple , **UpperCamelCase : Dict ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase ) def UpperCamelCase__ (self : List[str] , UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = '''tester''' lowercase__ = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' pass def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = self.get_tokenizers(do_lower_case=UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowercase__ = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) lowercase__ = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase ) self.assertEqual(len(UpperCamelCase ) , 1 ) lowercase__ = tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) self.assertTrue(special_token not in decoded ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}" ): lowercase__ ,lowercase__ = self.get_input_output_texts(UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) lowercase__ = tokenizer.convert_tokens_to_ids(UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertNotEqual(len(UpperCamelCase ) , 0 ) lowercase__ = tokenizer.decode(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , UpperCamelCase ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass
2
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase : str = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
1
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = DebertaVaTokenizer lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast lowerCAmelCase__ : str = True lowerCAmelCase__ : Tuple = True def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = '''this is a test''' lowercase__ = '''this is a test''' return input_text, output_text def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''<pad>''' lowercase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(UpperCamelCase ) , 30001 ) def UpperCamelCase__ (self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''This is a test''' lowercase__ = [13, 1, 4398, 25, 21, 1289] lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) # fmt: off lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DebertaVaTokenizer(UpperCamelCase ) lowercase__ = tokenizer.encode('''sequence builders''' ) lowercase__ = tokenizer.encode('''multi-sequence build''' ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , ) @slow def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : List[Any] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = """realm""" def __init__(self : str , UpperCamelCase : List[Any]=30522 , UpperCamelCase : List[Any]=768 , UpperCamelCase : int=128 , UpperCamelCase : Any=12 , UpperCamelCase : Tuple=12 , UpperCamelCase : List[Any]=8 , UpperCamelCase : Union[str, Any]=3072 , UpperCamelCase : List[str]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Dict=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[Any]=1E-12 , UpperCamelCase : Dict=256 , UpperCamelCase : Union[str, Any]=10 , UpperCamelCase : Optional[int]=1E-3 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[int]=320 , UpperCamelCase : List[str]=13353718 , UpperCamelCase : Optional[Any]=5000 , UpperCamelCase : str=1 , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : List[Any]=2 , **UpperCamelCase : int , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) # Common config lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = retriever_proj_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = num_candidates lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = type_vocab_size lowercase__ = layer_norm_eps # Reader config lowercase__ = span_hidden_size lowercase__ = max_span_width lowercase__ = reader_layer_norm_eps lowercase__ = reader_beam_size lowercase__ = reader_seq_len # Retrieval config lowercase__ = num_block_records lowercase__ = searcher_beam_size
2
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A , A ) -> list[str]: """simple docstring""" return [sentence[i : i + ngram_size] for i in range(len(A ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
2
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : str = logging.get_logger(__name__) lowerCamelCase : int = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = """mvp""" lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""] lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = classifier_dropout lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = use_prompt lowercase__ = prompt_length lowercase__ = prompt_mid_dim super().__init__( pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , ) if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ): lowercase__ = self.bos_token_id warnings.warn( f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " '''The config can simply be saved and uploaded again to be fixed.''' )
2
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _SCREAMING_SNAKE_CASE (A , A , A , A , A , A ) -> np.ndarray: """simple docstring""" if (ksize % 2) == 0: lowercase__ = ksize + 1 lowercase__ = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(A ): for x in range(A ): # distance from center lowercase__ = x - ksize // 2 lowercase__ = y - ksize // 2 # degree to radiant lowercase__ = theta / 180 * np.pi lowercase__ = np.cos(_theta ) lowercase__ = np.sin(_theta ) # get kernel x lowercase__ = cos_theta * px + sin_theta * py # get kernel y lowercase__ = -sin_theta * px + cos_theta * py # fill kernel lowercase__ = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image lowerCamelCase : Tuple = imread('../image_data/lena.jpg') # turn image in gray scale value lowerCamelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges lowerCamelCase : int = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 120, 150]: lowerCamelCase : Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) lowerCamelCase : List[str] = out / out.max() * 255 lowerCamelCase : int = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
2
'''simple docstring''' import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : List[str] = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = DebertaVaTokenizer lowerCAmelCase__ : List[Any] = DebertaVaTokenizerFast lowerCAmelCase__ : str = True lowerCAmelCase__ : Tuple = True def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase__ = DebertaVaTokenizer(UpperCamelCase , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' lowercase__ = '''this is a test''' lowercase__ = '''this is a test''' return input_text, output_text def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''<pad>''' lowercase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(UpperCamelCase ) , 30001 ) def UpperCamelCase__ (self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = ''' \tHeLLo!how \n Are yoU? ''' lowercase__ = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase__ = DebertaVaTokenizer(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , do_lower_case=UpperCamelCase , split_by_punct=UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = '''This is a test''' lowercase__ = [13, 1, 4398, 25, 21, 1289] lowercase__ = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase__ = DebertaVaTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = DebertaVaTokenizerFast(UpperCamelCase , keep_accents=UpperCamelCase ) lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) # fmt: off lowercase__ = '''I was born in 92000, and this is falsé.''' lowercase__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase__ = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase__ = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase__ = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) lowercase__ = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DebertaVaTokenizer(UpperCamelCase ) lowercase__ = tokenizer.encode('''sequence builders''' ) lowercase__ = tokenizer.encode('''multi-sequence build''' ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) lowercase__ = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase , ) @slow def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
2
1
'''simple docstring''' import collections import os import re from pathlib import Path lowerCamelCase : Optional[Any] = 'src/transformers' # Matches is_xxx_available() lowerCamelCase : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} lowerCamelCase : int = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCamelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available lowerCamelCase : int = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") lowerCamelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCamelCase : Optional[Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", lowerCamelCase : Any = re.compile(R'^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], lowerCamelCase : List[Any] = re.compile(R'^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo lowerCamelCase : Union[str, Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: lowerCamelCase : Union[str, Any] = re.compile(R'^\s*try:') # Catches a line with else: lowerCamelCase : Tuple = re.compile(R'^\s*else:') def _SCREAMING_SNAKE_CASE (A ) -> Union[str, Any]: """simple docstring""" if _re_test_backend.search(A ) is None: return None lowercase__ = [b[0] for b in _re_backend.findall(A )] backends.sort() return "_and_".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" with open(A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowercase__ = f.readlines() lowercase__ = 0 while line_index < len(A ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A ): return None # First grab the objects without a specific backend in _import_structure lowercase__ = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowercase__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A ): lowercase__ = _re_one_line_import_struct.search(A ).groups()[0] lowercase__ = re.findall(R'''\[([^\]]+)\]''' , A ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowercase__ = _re_import_struct_key_value.search(A ) if single_line_import_search is not None: lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A ) > 0] objects.extend(A ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowercase__ = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowercase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowercase__ = lines[line_index] if _re_import_struct_add_one.search(A ) is not None: objects.append(_re_import_struct_add_one.search(A ).groups()[0] ) elif _re_import_struct_add_many.search(A ) is not None: lowercase__ = _re_import_struct_add_many.search(A ).groups()[0].split(''', ''' ) lowercase__ = [obj[1:-1] for obj in imports if len(A ) > 0] objects.extend(A ) elif _re_between_brackets.search(A ) is not None: lowercase__ = _re_between_brackets.search(A ).groups()[0].split(''', ''' ) lowercase__ = [obj[1:-1] for obj in imports if len(A ) > 0] objects.extend(A ) elif _re_quote_object.search(A ) is not None: objects.append(_re_quote_object.search(A ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowercase__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowercase__ = [] while ( line_index < len(A ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowercase__ = lines[line_index] lowercase__ = _re_import.search(A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowercase__ = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(A ): # If the line is an if is_backend_available, we grab all objects associated. lowercase__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowercase__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowercase__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowercase__ = lines[line_index] lowercase__ = _re_import.search(A ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowercase__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" def find_duplicates(A ): return [k for k, v in collections.Counter(A ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowercase__ = [] for key in import_dict_objects.keys(): lowercase__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" ) lowercase__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowercase__ = '''base imports''' if key == '''none''' else f"{key} backend" errors.append(f"Differences for {name}:" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f" {a} in TYPE_HINT but not in _import_structure." ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f" {a} in _import_structure but not in TYPE_HINT." ) return errors def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = [] for root, _, files in os.walk(A ): if "__init__.py" in files: lowercase__ = os.path.join(A , '''__init__.py''' ) lowercase__ = parse_init(A ) if objects is not None: lowercase__ = analyze_results(*A ) if len(A ) > 0: lowercase__ = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}" failures.append('''\n'''.join(A ) ) if len(A ) > 0: raise ValueError('''\n\n'''.join(A ) ) def _SCREAMING_SNAKE_CASE () -> Dict: """simple docstring""" lowercase__ = [] for path, directories, files in os.walk(A ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(A ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A ) / folder).glob('''*.py''' ) ) ) == 0: continue lowercase__ = str((Path(A ) / folder).relative_to(A ) ) lowercase__ = short_path.replace(os.path.sep , '''.''' ) submodules.append(A ) for fname in files: if fname == "__init__.py": continue lowercase__ = str((Path(A ) / fname).relative_to(A ) ) lowercase__ = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(A ) return submodules lowerCamelCase : List[Any] = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', 'models.esm.openfold_utils', ] def _SCREAMING_SNAKE_CASE () -> str: """simple docstring""" from transformers.utils import direct_transformers_import lowercase__ = direct_transformers_import(A ) lowercase__ = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(A , '''__init__.py''' ) , '''r''' ) as f: lowercase__ = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , A ) ) ) lowercase__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(A ) > 0: lowercase__ = '''\n'''.join(f"- {module}" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' f"{list_of_modules}\n" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
2
'''simple docstring''' import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]: """simple docstring""" lowercase__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''_float_tensor''', '''decoder.output_projection.weight''', ] for k in ignore_keys: state_dict.pop(A , A ) def _SCREAMING_SNAKE_CASE (A ) -> List[str]: """simple docstring""" lowercase__ ,lowercase__ = emb.weight.shape lowercase__ = nn.Linear(A , A , bias=A ) lowercase__ = emb.weight.data return lin_layer def _SCREAMING_SNAKE_CASE (A , A="facebook/mbart-large-en-ro" , A=False , A=False ) -> Union[str, Any]: """simple docstring""" lowercase__ = torch.load(A , map_location='''cpu''' )['''model'''] remove_ignore_keys_(A ) lowercase__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] lowercase__ = MBartConfig.from_pretrained(A , vocab_size=A ) if mbart_aa and finetuned: lowercase__ = '''relu''' lowercase__ = state_dict['''decoder.embed_tokens.weight'''] lowercase__ = MBartForConditionalGeneration(A ) model.model.load_state_dict(A ) if finetuned: lowercase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCamelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') lowerCamelCase : Any = parser.parse_args() lowerCamelCase : List[str] = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
2
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = """bert-generation""" def __init__(self : List[str] , UpperCamelCase : Tuple=50358 , UpperCamelCase : Dict=1024 , UpperCamelCase : List[Any]=24 , UpperCamelCase : int=16 , UpperCamelCase : List[str]=4096 , UpperCamelCase : Union[str, Any]="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=512 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : str=1E-12 , UpperCamelCase : Optional[int]=0 , UpperCamelCase : Tuple=2 , UpperCamelCase : List[str]=1 , UpperCamelCase : Dict="absolute" , UpperCamelCase : Tuple=True , **UpperCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache
2
'''simple docstring''' import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowerCamelCase : List[Any] = logging.getLogger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : Any=-1 ): '''simple docstring''' lowercase__ = label_idx def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: lowercase__ = [] lowercase__ = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 lowercase__ = [] lowercase__ = [] else: lowercase__ = line.split(''' ''' ) words.append(splits[0] ) if len(UpperCamelCase ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) return examples def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(UpperCamelCase ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: lowercase__ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(UpperCamelCase ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : List[Any] ): '''simple docstring''' super().__init__(label_idx=-2 ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: lowercase__ = f.read().splitlines() if "O" not in labels: lowercase__ = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[Split, str] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): lowercase__ = mode.value lowercase__ = os.path.join(UpperCamelCase , f"{mode}.txt" ) lowercase__ = 1 lowercase__ = [] with open(UpperCamelCase , encoding='''utf-8''' ) as f: for sentence in parse_incr(UpperCamelCase ): lowercase__ = [] lowercase__ = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(UpperCamelCase ) == len(UpperCamelCase ) if words: examples.append(InputExample(guid=f"{mode}-{guid_index}" , words=UpperCamelCase , labels=UpperCamelCase ) ) guid_index += 1 return examples def UpperCamelCase__ (self : Tuple , UpperCamelCase : TextIO , UpperCamelCase : TextIO , UpperCamelCase : List ): '''simple docstring''' lowercase__ = 0 for sentence in parse_incr(UpperCamelCase ): lowercase__ = preds_list[example_id] lowercase__ = '''''' for token in sentence: out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) " out += "\n" writer.write(UpperCamelCase ) example_id += 1 def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' if path: with open(UpperCamelCase , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
2
1
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor from transformers.testing_utils import require_torch, slow from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowerCamelCase : List[str] = random.Random() def _SCREAMING_SNAKE_CASE (A , A=1.0 , A=None , A=None ) -> List[Any]: """simple docstring""" if rng is None: lowercase__ = global_rng lowercase__ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def __init__(self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any]=7 , UpperCamelCase : Union[str, Any]=400 , UpperCamelCase : List[Any]=2000 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Dict=0.0 , UpperCamelCase : int=16000 , UpperCamelCase : Dict=True , UpperCamelCase : List[str]=True , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = min_seq_length lowercase__ = max_seq_length lowercase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase__ = feature_size lowercase__ = padding_value lowercase__ = sampling_rate lowercase__ = return_attention_mask lowercase__ = do_normalize def UpperCamelCase__ (self : int ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : Dict=False , UpperCamelCase : Tuple=False ): '''simple docstring''' def _flatten(UpperCamelCase : Dict ): return list(itertools.chain(*UpperCamelCase ) ) if equal_length: lowercase__ = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size lowercase__ = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase__ = [np.asarray(UpperCamelCase ) for x in speech_inputs] return speech_inputs class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = WavaVecaFeatureExtractor def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = WavaVecaFeatureExtractionTester(self ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Tuple ): '''simple docstring''' self.assertTrue(np.all(np.mean(UpperCamelCase , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(UpperCamelCase , axis=0 ) - 1 ) < 1E-3 ) ) def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs] # Test not batched input lowercase__ = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values lowercase__ = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) ) # Test batched lowercase__ = feat_extract(UpperCamelCase , return_tensors='''np''' ).input_values lowercase__ = feat_extract(UpperCamelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ): self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowercase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowercase__ = np.asarray(UpperCamelCase ) lowercase__ = feat_extract(UpperCamelCase , return_tensors='''np''' ).input_values lowercase__ = feat_extract(UpperCamelCase , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ): self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) ) def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ = ['''longest''', '''max_length''', '''do_not_pad'''] lowercase__ = [None, 1600, None] for max_length, padding in zip(UpperCamelCase , UpperCamelCase ): lowercase__ = feat_extract(UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , return_tensors='''np''' ) lowercase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self.assertTrue(input_values[0][800:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self.assertTrue(input_values[0][1000:].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = range(800 , 1400 , 200 ) lowercase__ = [floats_list((1, x) )[0] for x in lengths] lowercase__ = ['''longest''', '''max_length''', '''do_not_pad'''] lowercase__ = [None, 1600, None] for max_length, padding in zip(UpperCamelCase , UpperCamelCase ): lowercase__ = feat_extract(UpperCamelCase , max_length=UpperCamelCase , padding=UpperCamelCase ) lowercase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800] ) self._check_zero_mean_unit_variance(input_values[1][:1000] ) self._check_zero_mean_unit_variance(input_values[2][:1200] ) def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ = feat_extract( UpperCamelCase , truncation=UpperCamelCase , max_length=1000 , padding='''max_length''' , return_tensors='''np''' ) lowercase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1] ) self._check_zero_mean_unit_variance(input_values[2] ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ = feat_extract( UpperCamelCase , truncation=UpperCamelCase , max_length=1000 , padding='''longest''' , return_tensors='''np''' ) lowercase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000) ) lowercase__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowercase__ = feat_extract( UpperCamelCase , truncation=UpperCamelCase , max_length=2000 , padding='''longest''' , return_tensors='''np''' ) lowercase__ = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800] ) self._check_zero_mean_unit_variance(input_values[1, :1000] ) self._check_zero_mean_unit_variance(input_values[2] ) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200) ) @require_torch def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' import torch lowercase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ = np.random.rand(100 ).astype(np.floataa ) lowercase__ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) lowercase__ = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) @slow @require_torch def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST: lowercase__ = WavaVecaConfig.from_pretrained(UpperCamelCase ) lowercase__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase ) # only "layer" feature extraction norm should make use of # attention_mask self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == '''layer''' )
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Union[str, Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = """megatron-bert""" def __init__(self : Tuple , UpperCamelCase : Optional[int]=29056 , UpperCamelCase : Optional[Any]=1024 , UpperCamelCase : Any=24 , UpperCamelCase : int=16 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : int="gelu" , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=2 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Optional[int]="absolute" , UpperCamelCase : List[Any]=True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache
2
1
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : int = IFInpaintingPipeline lowerCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} lowerCAmelCase__ : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCAmelCase__ : List[str] = PipelineTesterMixin.required_optional_params - {"""latents"""} def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return self._get_dummy_components() def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=0 ): '''simple docstring''' if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) lowercase__ = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ (self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' self._test_save_load_local() def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
2
'''simple docstring''' # Lint as: python3 import itertools import os import re lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])') lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])') lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)') lowerCamelCase : List[Any] = re.compile(R'(_{2,})') lowerCamelCase : str = R'^\w+(\.\w+)*$' lowerCamelCase : Dict = R'<>:/\|?*' def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A ) lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A ) return name.lower() def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" lowercase__ = _single_underscore_re.split(A ) lowercase__ = [_multiple_underscores_re.split(A ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' ) def _SCREAMING_SNAKE_CASE (A ) -> Tuple: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" if os.path.basename(A ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , A ): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." ) return f"{filename_prefix_for_name(A )}-{split}" def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) if filetype_suffix: prefix += f".{filetype_suffix}" lowercase__ = os.path.join(A , A ) return f"{filepath}*" def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]: """simple docstring""" lowercase__ = filename_prefix_for_split(A , A ) lowercase__ = os.path.join(A , A ) if shard_lengths: lowercase__ = len(A ) lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )] if filetype_suffix: lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: lowercase__ = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
2
1
'''simple docstring''' import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class __lowerCAmelCase (nn.Module ): '''simple docstring''' def __init__(self : int , UpperCamelCase : nn.Module , UpperCamelCase : int ): '''simple docstring''' super().__init__() lowercase__ = module lowercase__ = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase , bias=UpperCamelCase ) , nn.Linear(UpperCamelCase , module.out_features , bias=UpperCamelCase ) , ) lowercase__ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def UpperCamelCase__ (self : Any , UpperCamelCase : str , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.module(UpperCamelCase , *UpperCamelCase , **UpperCamelCase ) + self.adapter(UpperCamelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = """bigscience/bloom-1b7""" # Constant values lowerCAmelCase__ : Optional[Any] = 2.109_659_552_692_574 lowerCAmelCase__ : Union[str, Any] = """Hello my name is""" lowerCAmelCase__ : Tuple = set() EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" ) EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" ) EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" ) lowerCAmelCase__ : Union[str, Any] = 10 def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = AutoTokenizer.from_pretrained(self.model_name ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' super().setUp() # Models and tokenizer lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='''auto''' ) lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) def UpperCamelCase__ (self : int ): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase , '''quantization_config''' ) ) lowercase__ = config.to_dict() lowercase__ = config.to_diff_dict() lowercase__ = config.to_json_string() def UpperCamelCase__ (self : str ): '''simple docstring''' from bitsandbytes.nn import Paramsabit lowercase__ = self.model_fpaa.get_memory_footprint() lowercase__ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) lowercase__ = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def UpperCamelCase__ (self : int ): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ) lowercase__ = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase ) , self.EXPECTED_OUTPUTS ) def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = BitsAndBytesConfig() lowercase__ = True lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase , device_map='''auto''' ) lowercase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ) lowercase__ = model_abit_from_config.generate( input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase ) , self.EXPECTED_OUTPUTS ) def UpperCamelCase__ (self : int ): '''simple docstring''' with self.assertRaises(UpperCamelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase ): lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase , load_in_abit=UpperCamelCase , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , ) def UpperCamelCase__ (self : int ): '''simple docstring''' with self.assertRaises(UpperCamelCase ): # Tries with `str` self.model_abit.to('''cpu''' ) with self.assertRaises(UpperCamelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(UpperCamelCase ): # Tries with a `device` self.model_abit.to(torch.device('''cuda:0''' ) ) with self.assertRaises(UpperCamelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowercase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ) lowercase__ = self.model_fpaa.to(torch.floataa ) lowercase__ = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) # Check this does not throw an error lowercase__ = self.model_fpaa.to('''cpu''' ) # Check this does not throw an error lowercase__ = self.model_fpaa.half() # Check this does not throw an error lowercase__ = self.model_fpaa.float() def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=UpperCamelCase , device_map='''auto''' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase__ (cls : Dict ): '''simple docstring''' lowercase__ = '''t5-small''' lowercase__ = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense lowercase__ = AutoTokenizer.from_pretrained(cls.model_name ) lowercase__ = '''Translate in German: Hello, my dog is cute''' def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' from transformers import TaForConditionalGeneration lowercase__ = TaForConditionalGeneration._keep_in_fpaa_modules lowercase__ = None # test with `t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) lowercase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) lowercase__ = model.generate(**UpperCamelCase ) # test with `flan-t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) lowercase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) lowercase__ = model.generate(**UpperCamelCase ) lowercase__ = modules def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) lowercase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) lowercase__ = model.generate(**UpperCamelCase ) # test with `flan-t5-small` lowercase__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) lowercase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 ) lowercase__ = model.generate(**UpperCamelCase ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Dict ): '''simple docstring''' super().setUp() # model_name lowercase__ = '''bigscience/bloom-560m''' lowercase__ = '''t5-small''' # Different types of model lowercase__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) # Sequence classification model lowercase__ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) # CausalLM model lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) # Seq2seq model lowercase__ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase , device_map='''auto''' ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : List[str] ): '''simple docstring''' super().setUp() def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = pipeline( '''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass lowercase__ = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Tuple ): '''simple docstring''' super().setUp() def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase , device_map='''balanced''' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model lowercase__ = self.tokenizer(self.input_text , return_tensors='''pt''' ) # Second real batch lowercase__ = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase ) , self.EXPECTED_OUTPUTS ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = '''facebook/opt-350m''' super().setUp() def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ): return # Step 1: freeze all parameters lowercase__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): lowercase__ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowercase__ = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase ) ): lowercase__ = LoRALayer(module.q_proj , rank=16 ) lowercase__ = LoRALayer(module.k_proj , rank=16 ) lowercase__ = LoRALayer(module.v_proj , rank=16 ) # Step 3: dummy batch lowercase__ = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowercase__ = model.forward(**UpperCamelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase , UpperCamelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(UpperCamelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = """gpt2-xl""" lowerCAmelCase__ : Dict = 3.3_191_854_854_152_187
2
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class __lowerCAmelCase : '''simple docstring''' def __init__(self : str , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Tuple=16 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : List[Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : str=True , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Tuple=30 , UpperCamelCase : str=0 , UpperCamelCase : Tuple=1 , UpperCamelCase : List[Any]=2 , UpperCamelCase : str=None , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = decoder_seq_length # For common tests lowercase__ = self.decoder_seq_length lowercase__ = is_training lowercase__ = use_attention_mask lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_model lowercase__ = decoder_layers lowercase__ = decoder_layers lowercase__ = decoder_ffn_dim lowercase__ = decoder_attention_heads lowercase__ = decoder_attention_heads lowercase__ = eos_token_id lowercase__ = bos_token_id lowercase__ = pad_token_id lowercase__ = decoder_start_token_id lowercase__ = use_cache lowercase__ = max_position_embeddings lowercase__ = None lowercase__ = decoder_seq_length lowercase__ = 2 lowercase__ = 1 def UpperCamelCase__ (self : str ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = None if self.use_attention_mask: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) lowercase__ = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str] , ): '''simple docstring''' lowercase__ = True lowercase__ = TrOCRDecoder(config=UpperCamelCase ).to(UpperCamelCase ).eval() lowercase__ = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) lowercase__ = model(UpperCamelCase ) lowercase__ = model(UpperCamelCase , use_cache=UpperCamelCase ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 ) lowercase__ = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids lowercase__ = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ = model(UpperCamelCase )['''last_hidden_state'''] lowercase__ = model(UpperCamelCase , past_key_values=UpperCamelCase )['''last_hidden_state'''] # select random slice lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() lowercase__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 ) def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.prepare_config_and_inputs() lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class __lowerCAmelCase (lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCAmelCase__ : List[Any] = (TrOCRForCausalLM,) if is_torch_available() else () lowerCAmelCase__ : Optional[Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : List[str] = False def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCamelCase ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' pass def UpperCamelCase__ (self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*UpperCamelCase ) def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return @unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :) def UpperCamelCase__ (self : List[str] ): '''simple docstring''' pass
2
1
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCamelCase : str = Mapping[str, np.ndarray] lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict. lowerCamelCase : Any = 0.0_1 @dataclasses.dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent lowerCAmelCase__ : Optional[Sequence[int]] = None def _SCREAMING_SNAKE_CASE (A ) -> Protein: """simple docstring""" lowercase__ = R'''(\[[A-Z]+\]\n)''' lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0] lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowercase__ = ["N", "CA", "C"] lowercase__ = None lowercase__ = None lowercase__ = None for g in groups: if "[PRIMARY]" == g[0]: lowercase__ = g[1][0].strip() for i in range(len(A ) ): if seq[i] not in residue_constants.restypes: lowercase__ = '''X''' # FIXME: strings are immutable lowercase__ = np.array( [residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowercase__ = [] for axis in range(3 ): tertiary.append(list(map(A , g[1][axis].split() ) ) ) lowercase__ = np.array(A ) lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowercase__ = np.zeros( ( len(A ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , ) def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) lowercase__ = prot.parents lowercase__ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowercase__ = [p for i, p in zip(A , A ) if i == chain_id] if parents is None or len(A ) == 0: lowercase__ = ['''N/A'''] pdb_headers.append(f"PARENT {' '.join(A )}" ) return pdb_headers def _SCREAMING_SNAKE_CASE (A , A ) -> str: """simple docstring""" lowercase__ = [] lowercase__ = pdb_str.split('''\n''' ) lowercase__ = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) lowercase__ = 42 if prot.parents is not None and len(prot.parents ) > 0: lowercase__ = [] if prot.parents_chain_index is not None: lowercase__ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(A ) , [] ) parent_dict[str(A )].append(A ) lowercase__ = max([int(A ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] ) parents_per_chain.append(A ) else: parents_per_chain.append(list(prot.parents ) ) else: lowercase__ = [['''N/A''']] def make_parent_line(A ) -> str: return f"PARENT {' '.join(A )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowercase__ = 0 for i, l in enumerate(A ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(A ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(A ): lowercase__ = parents_per_chain[chain_counter] else: lowercase__ = ['''N/A'''] out_pdb_lines.append(make_parent_line(A ) ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = residue_constants.restypes + ['''X'''] def res_atoa(A ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowercase__ = residue_constants.atom_types lowercase__ = [] lowercase__ = prot.atom_mask lowercase__ = prot.aatype lowercase__ = prot.atom_positions lowercase__ = prot.residue_index.astype(np.intaa ) lowercase__ = prot.b_factors lowercase__ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowercase__ = get_pdb_headers(A ) if len(A ) > 0: pdb_lines.extend(A ) lowercase__ = aatype.shape[0] lowercase__ = 1 lowercase__ = 0 lowercase__ = string.ascii_uppercase lowercase__ = None # Add all atom sites. for i in range(A ): lowercase__ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowercase__ = '''ATOM''' lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}" lowercase__ = '''''' lowercase__ = '''''' lowercase__ = 1.00 lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works. lowercase__ = '''''' lowercase__ = '''A''' if chain_index is not None: lowercase__ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowercase__ = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(A ) atom_index += 1 lowercase__ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowercase__ = True lowercase__ = chain_index[i + 1] if should_terminate: # Close the chain. lowercase__ = '''TER''' lowercase__ = ( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(A ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(A , A ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
2
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" if not isinstance(A , A ): raise TypeError('''only integers accepted as input''' ) else: lowercase__ = str(abs(A ) ) lowercase__ = [list(A ) for char in range(len(A ) )] for index in range(len(A ) ): num_transpositions[index].pop(A ) return max( int(''''''.join(list(A ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
2
1
'''simple docstring''' import argparse import hashlib # hashlib is only used inside the Test class import struct class __lowerCAmelCase : '''simple docstring''' def __init__(self : Any , UpperCamelCase : Tuple ): '''simple docstring''' lowercase__ = data lowercase__ = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0] @staticmethod def UpperCamelCase__ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any] ): '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64) lowercase__ = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) ) return padded_data def UpperCamelCase__ (self : Optional[int] ): '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def UpperCamelCase__ (self : Tuple , UpperCamelCase : str ): '''simple docstring''' lowercase__ = list(struct.unpack('''>16L''' , UpperCamelCase ) ) + [0] * 64 for i in range(16 , 80 ): lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = self.padding() lowercase__ = self.split_blocks() for block in self.blocks: lowercase__ = self.expand_block(UpperCamelCase ) lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = self.h for i in range(0 , 80 ): if 0 <= i < 20: lowercase__ = (b & c) | ((~b) & d) lowercase__ = 0x5_A_8_2_7_9_9_9 elif 20 <= i < 40: lowercase__ = b ^ c ^ d lowercase__ = 0x6_E_D_9_E_B_A_1 elif 40 <= i < 60: lowercase__ = (b & c) | (b & d) | (c & d) lowercase__ = 0x8_F_1_B_B_C_D_C elif 60 <= i < 80: lowercase__ = b ^ c ^ d lowercase__ = 0xC_A_6_2_C_1_D_6 lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = ( self.rotate(UpperCamelCase , 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F, a, self.rotate(UpperCamelCase , 30 ), c, d, ) lowercase__ = ( self.h[0] + a & 0xF_F_F_F_F_F_F_F, self.h[1] + b & 0xF_F_F_F_F_F_F_F, self.h[2] + c & 0xF_F_F_F_F_F_F_F, self.h[3] + d & 0xF_F_F_F_F_F_F_F, self.h[4] + e & 0xF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h ) def _SCREAMING_SNAKE_CASE () -> Optional[Any]: """simple docstring""" lowercase__ = b'''Test String''' assert SHAaHash(A ).final_hash() == hashlib.shaa(A ).hexdigest() # noqa: S324 def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = argparse.ArgumentParser(description='''Process some strings or files''' ) parser.add_argument( '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , ) parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' ) lowercase__ = parser.parse_args() lowercase__ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , '''rb''' ) as f: lowercase__ = f.read() else: lowercase__ = bytes(A , '''utf-8''' ) print(SHAaHash(A ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
2
'''simple docstring''' import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCamelCase : str = Mapping[str, np.ndarray] lowerCamelCase : List[Any] = Mapping[str, Any] # Is a nested dict. lowerCamelCase : Any = 0.0_1 @dataclasses.dataclass(frozen=lowercase_ ) class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. lowerCAmelCase__ : np.ndarray # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. lowerCAmelCase__ : np.ndarray # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. lowerCAmelCase__ : np.ndarray # [num_res, num_atom_type] # Chain indices for multi-chain predictions lowerCAmelCase__ : Optional[np.ndarray] = None # Optional remark about the protein. Included as a comment in output PDB # files lowerCAmelCase__ : Optional[str] = None # Templates used to generate this protein (prediction-only) lowerCAmelCase__ : Optional[Sequence[str]] = None # Chain corresponding to each parent lowerCAmelCase__ : Optional[Sequence[int]] = None def _SCREAMING_SNAKE_CASE (A ) -> Protein: """simple docstring""" lowercase__ = R'''(\[[A-Z]+\]\n)''' lowercase__ = [tag.strip() for tag in re.split(A , A ) if len(A ) > 0] lowercase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowercase__ = ["N", "CA", "C"] lowercase__ = None lowercase__ = None lowercase__ = None for g in groups: if "[PRIMARY]" == g[0]: lowercase__ = g[1][0].strip() for i in range(len(A ) ): if seq[i] not in residue_constants.restypes: lowercase__ = '''X''' # FIXME: strings are immutable lowercase__ = np.array( [residue_constants.restype_order.get(A , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowercase__ = [] for axis in range(3 ): tertiary.append(list(map(A , g[1][axis].split() ) ) ) lowercase__ = np.array(A ) lowercase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowercase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowercase__ = np.zeros( ( len(A ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(A ): lowercase__ = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=A , atom_mask=A , aatype=A , residue_index=np.arange(len(A ) ) , b_factors=A , ) def _SCREAMING_SNAKE_CASE (A , A = 0 ) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = prot.remark if remark is not None: pdb_headers.append(f"REMARK {remark}" ) lowercase__ = prot.parents lowercase__ = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowercase__ = [p for i, p in zip(A , A ) if i == chain_id] if parents is None or len(A ) == 0: lowercase__ = ['''N/A'''] pdb_headers.append(f"PARENT {' '.join(A )}" ) return pdb_headers def _SCREAMING_SNAKE_CASE (A , A ) -> str: """simple docstring""" lowercase__ = [] lowercase__ = pdb_str.split('''\n''' ) lowercase__ = prot.remark if remark is not None: out_pdb_lines.append(f"REMARK {remark}" ) lowercase__ = 42 if prot.parents is not None and len(prot.parents ) > 0: lowercase__ = [] if prot.parents_chain_index is not None: lowercase__ = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(A ) , [] ) parent_dict[str(A )].append(A ) lowercase__ = max([int(A ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowercase__ = parent_dict.get(str(A ) , ['''N/A'''] ) parents_per_chain.append(A ) else: parents_per_chain.append(list(prot.parents ) ) else: lowercase__ = [['''N/A''']] def make_parent_line(A ) -> str: return f"PARENT {' '.join(A )}" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowercase__ = 0 for i, l in enumerate(A ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(A ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(A ): lowercase__ = parents_per_chain[chain_counter] else: lowercase__ = ['''N/A'''] out_pdb_lines.append(make_parent_line(A ) ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> str: """simple docstring""" lowercase__ = residue_constants.restypes + ['''X'''] def res_atoa(A ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowercase__ = residue_constants.atom_types lowercase__ = [] lowercase__ = prot.atom_mask lowercase__ = prot.aatype lowercase__ = prot.atom_positions lowercase__ = prot.residue_index.astype(np.intaa ) lowercase__ = prot.b_factors lowercase__ = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowercase__ = get_pdb_headers(A ) if len(A ) > 0: pdb_lines.extend(A ) lowercase__ = aatype.shape[0] lowercase__ = 1 lowercase__ = 0 lowercase__ = string.ascii_uppercase lowercase__ = None # Add all atom sites. for i in range(A ): lowercase__ = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(A , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowercase__ = '''ATOM''' lowercase__ = atom_name if len(A ) == 4 else f" {atom_name}" lowercase__ = '''''' lowercase__ = '''''' lowercase__ = 1.00 lowercase__ = atom_name[0] # Protein supports only C, N, O, S, this works. lowercase__ = '''''' lowercase__ = '''A''' if chain_index is not None: lowercase__ = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowercase__ = ( f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}" f"{res_name_a:>3} {chain_tag:>1}" f"{residue_index[i]:>4}{insertion_code:>1} " f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}" f"{occupancy:>6.2f}{b_factor:>6.2f} " f"{element:>2}{charge:>2}" ) pdb_lines.append(A ) atom_index += 1 lowercase__ = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowercase__ = True lowercase__ = chain_index[i + 1] if should_terminate: # Close the chain. lowercase__ = '''TER''' lowercase__ = ( f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}" ) pdb_lines.append(A ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(A , A ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(A ) def _SCREAMING_SNAKE_CASE (A ) -> np.ndarray: """simple docstring""" return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def _SCREAMING_SNAKE_CASE (A , A , A = None , A = None , A = None , A = None , A = None , ) -> Protein: """simple docstring""" return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=A , remark=A , parents=A , parents_chain_index=A , )
2
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = tf.convert_to_tensor( [ [ 8.2_22_09_91, # 3rd highest value; idx. 0 -0.5_62_00_44, 5.23_22_97_52, 4.0_38_63_93, -6.8_79_83_78, -0.54_78_58_02, -3.2_01_21_53, 2.92_77_71_76, 1.88_17_19_53, 7.35_34_12_76, # 5th highest value; idx. 9 8.43_20_78_33, # 2nd highest value; idx. 10 -9.85_71_18_36, -5.96_20_92_36, -1.13_03_91_61, -7.1_11_52_94, -0.8_36_96_33, -5.3_18_64_08, 7.06_42_74_07, 0.81_36_93_44, -0.82_02_38_17, -5.9_17_97_96, 0.58_81_34_43, -6.99_77_84_38, 4.71_55_11_89, -0.18_77_16_37, 7.44_02_07_59, # 4th highest value; idx. 25 9.38_45_09_87, # 1st highest value; idx. 26 2.12_66_29_41, -9.32_56_20_38, 2.35_65_25_22, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_42_55_18, 4.53_13_92_38, -5.57_51_04_64, -6.28_03_06_99, -7.19_52_95_03, -4.02_12_25_51, 1.39_33_70_37, -6.06_70_70_57, 1.59_48_05_17, -9.64_31_19, 0.03_90_77_99, 0.67_23_17_62, -8.88_20_67_26, 6.27_11_59_22, # 4th highest value; idx. 13 2.28_52_07_23, 4.82_76_75_06, 4.30_42_13_68, 8.8_27_53_13, # 2nd highest value; idx. 17 5.44_02_99_58, # 5th highest value; idx. 18 -4.4_73_57_94, 7.38_57_95_36, # 3rd highest value; idx. 20 -2.91_05_16_63, 2.61_94_60_77, -2.5_67_47_62, -9.48_95_93_02, -4.02_92_26_45, -1.35_41_69_18, 9.67_70_23_23, # 1st highest value; idx. 27 -5.89_47_85_53, 1.85_37_04_67, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) lowercase__ = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above lowercase__ = tf.convert_to_tensor( [8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above lowercase__ = tf_top_k_top_p_filtering(UpperCamelCase , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) lowercase__ = output[output != -float('''inf''' )] lowercase__ = tf.cast( tf.where(tf.not_equal(UpperCamelCase , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , rtol=1E-12 ) tf.debugging.assert_equal(UpperCamelCase , UpperCamelCase ) @require_tf class __lowerCAmelCase (unittest.TestCase , lowercase_ ): '''simple docstring''' if is_tf_available(): lowerCAmelCase__ : Optional[int] = { """AutoModelForCausalLM""": TFAutoModelForCausalLM, """AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq, """AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM, """AutoModelForVision2Seq""": TFAutoModelForVisionaSeq, """LogitsProcessorList""": TFLogitsProcessorList, """MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor, """create_tensor_fn""": tf.convert_to_tensor, """floats_tensor""": floats_tensor, """return_tensors""": """tf""", } @slow def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowercase__ = 2 lowercase__ = 2 class __lowerCAmelCase (tf.Module ): '''simple docstring''' def __init__(self : str , UpperCamelCase : List[Any] ): '''simple docstring''' super(UpperCamelCase , self ).__init__() lowercase__ = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCamelCase , ) def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = self.model.generate( input_ids=UpperCamelCase , attention_mask=UpperCamelCase , max_new_tokens=UpperCamelCase , return_dict_in_generate=UpperCamelCase , ) return {"sequences": outputs["sequences"]} lowercase__ = [[2, 0], [102, 103]] lowercase__ = [[1, 0], [1, 1]] lowercase__ = DummyModel(model=UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} ) lowercase__ = tf.saved_model.load(UpperCamelCase ).signatures['''serving_default'''] for batch_size in range(1 , len(UpperCamelCase ) + 1 ): lowercase__ = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } lowercase__ = serving_func(**UpperCamelCase )['''sequences'''] lowercase__ = test_model.generate(**UpperCamelCase , max_new_tokens=UpperCamelCase ) tf.debugging.assert_equal(UpperCamelCase , UpperCamelCase ) @slow def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowercase__ = 1 lowercase__ = 2 class __lowerCAmelCase (tf.Module ): '''simple docstring''' def __init__(self : Tuple , UpperCamelCase : Tuple ): '''simple docstring''' super(UpperCamelCase , self ).__init__() lowercase__ = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=UpperCamelCase , ) def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = self.model.generate( input_ids=UpperCamelCase , attention_mask=UpperCamelCase , max_new_tokens=UpperCamelCase , return_dict_in_generate=UpperCamelCase , ) return {"sequences": outputs["sequences"]} lowercase__ = [[2], [102, 103]] lowercase__ = [[1], [1, 1]] lowercase__ = DummyModel(model=UpperCamelCase ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'''serving_default''': dummy_model.serving} ) lowercase__ = tf.saved_model.load(UpperCamelCase ).signatures['''serving_default'''] for input_row in range(len(UpperCamelCase ) ): lowercase__ = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } lowercase__ = serving_func(**UpperCamelCase )['''sequences'''] lowercase__ = test_model.generate(**UpperCamelCase , max_new_tokens=UpperCamelCase ) tf.debugging.assert_equal(UpperCamelCase , UpperCamelCase ) @slow @require_tensorflow_text def UpperCamelCase__ (self : Dict ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=UpperCamelCase ) class __lowerCAmelCase (tf.keras.layers.Layer ): '''simple docstring''' def __init__(self : Tuple ): '''simple docstring''' super().__init__() lowercase__ = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCamelCase , '''spiece.model''' ) , '''rb''' ).read() ) lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def UpperCamelCase__ (self : str , UpperCamelCase : Tuple , *UpperCamelCase : str , **UpperCamelCase : Tuple ): '''simple docstring''' lowercase__ = self.tokenizer.tokenize(UpperCamelCase ) lowercase__ ,lowercase__ = text.pad_model_inputs( UpperCamelCase , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) lowercase__ = self.model.generate(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) return self.tokenizer.detokenize(UpperCamelCase ) lowercase__ = CompleteSentenceTransformer() lowercase__ = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) lowercase__ = complete_model(UpperCamelCase ) lowercase__ = tf.keras.Model(UpperCamelCase , UpperCamelCase ) keras_model.save(UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } lowercase__ = 14 lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowercase__ = '''Hello, my dog is cute and''' lowercase__ = tokenizer(UpperCamelCase , return_tensors='''tf''' ) lowercase__ = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) lowercase__ = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) lowercase__ = model.generate(**UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) lowercase__ = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) lowercase__ = model.generate(**UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowercase__ = '''Hugging Face is a technology company based in New York and Paris.''' lowercase__ = bart_tokenizer(UpperCamelCase , return_tensors='''tf''' ).input_ids lowercase__ = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowercase__ = bart_model.generate(UpperCamelCase ).numpy() class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def UpperCamelCase__ (self : int , UpperCamelCase : Tuple , UpperCamelCase : int=None , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' return super().call(UpperCamelCase , **UpperCamelCase ) lowercase__ = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) lowercase__ = bart_model.generate(UpperCamelCase , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(UpperCamelCase , UpperCamelCase ) ) class __lowerCAmelCase (bart_model.model.encoder.__class__ ): '''simple docstring''' def UpperCamelCase__ (self : Any , UpperCamelCase : int , **UpperCamelCase : str ): '''simple docstring''' return super().call(UpperCamelCase , **UpperCamelCase ) lowercase__ = FakeEncoder(bart_model.config , bart_model.model.shared ) lowercase__ = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) lowercase__ = bart_model.generate(UpperCamelCase ).numpy() with self.assertRaises(UpperCamelCase ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCamelCase , foo='''bar''' )
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A , A ) -> list[list[int]]: """simple docstring""" lowercase__ = [] create_all_state(1 , A , A , [] , A ) return result def _SCREAMING_SNAKE_CASE (A , A , A , A , A , ) -> None: """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(A , total_number - level + 2 ): current_list.append(A ) create_all_state(i + 1 , A , level - 1 , A , A ) current_list.pop() def _SCREAMING_SNAKE_CASE (A ) -> None: """simple docstring""" for i in total_list: print(*A ) if __name__ == "__main__": lowerCamelCase : Tuple = 4 lowerCamelCase : Union[str, Any] = 2 lowerCamelCase : Dict = generate_all_combinations(n, k) print_all_state(total_list)
2
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Any = ['FNetTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = ['FNetTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ 'FNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FNetForMaskedLM', 'FNetForMultipleChoice', 'FNetForNextSentencePrediction', 'FNetForPreTraining', 'FNetForQuestionAnswering', 'FNetForSequenceClassification', 'FNetForTokenClassification', 'FNetLayer', 'FNetModel', 'FNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
'''simple docstring''' import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCamelCase : Optional[Any] = ( '4S 3H 2C 7S 5H', '9D 8H 2C 6S 7H', '2D 6D 9D TH 7D', 'TC 8C 2S JH 6C', 'JH 8S TH AH QH', 'TS KS 5S 9S AC', 'KD 6S 9D TH AD', 'KS 8D 4D 9S 4S', # pair '8C 4S KH JS 4D', # pair 'QH 8H KD JH 8S', # pair 'KC 4H KS 2H 8D', # pair 'KD 4S KC 3H 8S', # pair 'AH 8S AS KC JH', # pair '3H 4C 4H 3S 2H', # 2 pairs '5S 5D 2C KH KH', # 2 pairs '3C KH 5D 5S KH', # 2 pairs 'AS 3C KH AD KH', # 2 pairs '7C 7S 3S 7H 5S', # 3 of a kind '7C 7S KH 2H 7H', # 3 of a kind 'AC KH QH AH AS', # 3 of a kind '2H 4D 3C AS 5S', # straight (low ace) '3C 5C 4C 2C 6H', # straight '6S 8S 7S 5H 9H', # straight 'JS QS 9H TS KH', # straight 'QC KH TS JS AH', # straight (high ace) '8C 9C 5C 3C TC', # flush '3S 8S 9S 5S KS', # flush '4C 5C 9C 8C KC', # flush 'JH 8H AH KH QH', # flush '3D 2H 3H 2C 2D', # full house '2H 2C 3S 3H 3D', # full house 'KH KC 3S 3H 3D', # full house 'JC 6H JS JD JH', # 4 of a kind 'JC 7H JS JD JH', # 4 of a kind 'JC KH JS JD JH', # 4 of a kind '2S AS 4S 5S 3S', # straight flush (low ace) '2D 6D 3D 4D 5D', # straight flush '5C 6C 3C 7C 4C', # straight flush 'JH 9H TH KH QH', # straight flush 'JH AH TH KH QH', # royal flush (high ace straight flush) ) lowerCamelCase : Tuple = ( ('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'), ('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'), ('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'), ('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'), ('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'), ('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'), ('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'), ('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'), ('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'), ('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'), ('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'), ('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'), ('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'), ('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'), ('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'), ('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'), ('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'), ('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'), ('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'), ('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'), ('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'), ('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'), ('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'), ('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'), ('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'), ('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'), ('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'), ('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'), ('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'), ('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'), ('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'), ('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'), ) lowerCamelCase : Dict = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', True), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', False), ('AS 3S 4S 8S 2S', True), ) lowerCamelCase : Any = ( ('2H 3H 4H 5H 6H', True), ('AS AH 2H AD AC', False), ('2H 3H 5H 6H 7H', False), ('KS AS TS QS JS', True), ('8H 9H QS JS TH', True), ) lowerCamelCase : Tuple = ( ('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]), ('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]), ('JH QD KC AS TS', False, [14, 13, 12, 11, 10]), ('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]), ) lowerCamelCase : Optional[int] = ( ('JH AH TH KH QH', 0), ('JH 9H TH KH QH', 0), ('JC KH JS JD JH', 7), ('KH KC 3S 3H 3D', 6), ('8C 9C 5C 3C TC', 0), ('JS QS 9H TS KH', 0), ('7C 7S KH 2H 7H', 3), ('3C KH 5D 5S KH', 2), ('QH 8H KD JH 8S', 1), ('2D 6D 9D TH 7D', 0), ) lowerCamelCase : Dict = ( ('JH AH TH KH QH', 23), ('JH 9H TH KH QH', 22), ('JC KH JS JD JH', 21), ('KH KC 3S 3H 3D', 20), ('8C 9C 5C 3C TC', 19), ('JS QS 9H TS KH', 18), ('7C 7S KH 2H 7H', 17), ('3C KH 5D 5S KH', 16), ('QH 8H KD JH 8S', 15), ('2D 6D 9D TH 7D', 14), ) def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ ,lowercase__ = randrange(len(A ) ), randrange(len(A ) ) lowercase__ = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)] lowercase__ ,lowercase__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _SCREAMING_SNAKE_CASE (A = 100 ) -> str: """simple docstring""" return (generate_random_hand() for _ in range(A )) @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> List[str]: """simple docstring""" assert PokerHand(A )._is_flush() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A )._is_straight() == expected @pytest.mark.parametrize('''hand, expected, card_values''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = PokerHand(A ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Tuple: """simple docstring""" assert PokerHand(A )._is_same_kind() == expected @pytest.mark.parametrize('''hand, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A )._hand_type == expected @pytest.mark.parametrize('''hand, other, expected''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected @pytest.mark.parametrize('''hand, other, expected''' , generate_random_hands() ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[Any]: """simple docstring""" assert PokerHand(A ).compare_with(PokerHand(A ) ) == expected def _SCREAMING_SNAKE_CASE () -> Tuple: """simple docstring""" lowercase__ = [PokerHand(A ) for hand in SORTED_HANDS] lowercase__ = poker_hands.copy() shuffle(A ) lowercase__ = chain(sorted(A ) ) for index, hand in enumerate(A ): assert hand == poker_hands[index] def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" lowercase__ = [PokerHand('''2D AC 3H 4H 5S''' ), PokerHand('''2S 3H 4H 5S 6C''' )] pokerhands.sort(reverse=A ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _SCREAMING_SNAKE_CASE () -> int: """simple docstring""" lowercase__ = PokerHand('''2C 4S AS 3D 5C''' ) lowercase__ = True lowercase__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = 0 lowercase__ = os.path.abspath(os.path.dirname(A ) ) lowercase__ = os.path.join(A , '''poker_hands.txt''' ) with open(A ) as file_hand: for line in file_hand: lowercase__ = line[:14].strip() lowercase__ = line[15:].strip() lowercase__ ,lowercase__ = PokerHand(A ), PokerHand(A ) lowercase__ = player.compare_with(A ) if output == "Win": answer += 1 assert answer == 376
2
1
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers lowerCamelCase : List[Any] = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
2
'''simple docstring''' import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowerCamelCase : List[str] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowerCamelCase : str = parser.parse_args() if args.model_type == "bert": lowerCamelCase : List[Any] = BertForMaskedLM.from_pretrained(args.model_name) lowerCamelCase : Any = 'bert' else: raise ValueError('args.model_type should be "bert".') lowerCamelCase : int = model.state_dict() lowerCamelCase : int = {} for w in ["word_embeddings", "position_embeddings"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""] for w in ["weight", "bias"]: lowerCamelCase : Any = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""] lowerCamelCase : Tuple = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}""" ] lowerCamelCase : List[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}""" ] lowerCamelCase : Tuple = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}""" ] lowerCamelCase : Optional[int] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}""" ] lowerCamelCase : Optional[Any] = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}""" ] lowerCamelCase : Dict = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}""" ] lowerCamelCase : Any = state_dict[ f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}""" ] std_idx += 1 lowerCamelCase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowerCamelCase : str = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowerCamelCase : str = state_dict[f"""cls.predictions.transform.dense.{w}"""] lowerCamelCase : Any = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""] print(f"""N layers selected for distillation: {std_idx}""") print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""") print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""") torch.save(compressed_sd, args.dump_checkpoint)
2
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __lowerCAmelCase : '''simple docstring''' lowerCAmelCase__ : Optional[int] = BlenderbotSmallConfig lowerCAmelCase__ : List[Any] = {} lowerCAmelCase__ : List[Any] = """gelu""" def __init__(self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : int=7 , UpperCamelCase : int=True , UpperCamelCase : str=False , UpperCamelCase : str=99 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Any=2 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : List[Any]=37 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : List[str]=20 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : int=1 , UpperCamelCase : Tuple=0 , ): '''simple docstring''' lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = eos_token_id lowercase__ = pad_token_id lowercase__ = bos_token_id def UpperCamelCase__ (self : Dict ): '''simple docstring''' lowercase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__ = prepare_blenderbot_small_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def UpperCamelCase__ (self : Tuple , UpperCamelCase : Any , UpperCamelCase : Dict ): '''simple docstring''' lowercase__ = TFBlenderbotSmallModel(config=UpperCamelCase ).get_decoder() lowercase__ = inputs_dict['''input_ids'''] lowercase__ = input_ids[:1, :] lowercase__ = inputs_dict['''attention_mask'''][:1, :] lowercase__ = inputs_dict['''head_mask'''] lowercase__ = 1 # first forward pass lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , head_mask=UpperCamelCase , use_cache=UpperCamelCase ) lowercase__ ,lowercase__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__ = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , past_key_values=UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__ = output_from_no_past[:, -3:, random_slice_idx] lowercase__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase , UpperCamelCase , rtol=1E-3 ) def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None , A=None , A=None , A=None , ) -> List[Any]: """simple docstring""" if attention_mask is None: lowercase__ = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowercase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) lowerCAmelCase__ : Optional[int] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ : Tuple = ( { """conversational""": TFBlenderbotSmallForConditionalGeneration, """feature-extraction""": TFBlenderbotSmallModel, """summarization""": TFBlenderbotSmallForConditionalGeneration, """text2text-generation""": TFBlenderbotSmallForConditionalGeneration, """translation""": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ : List[str] = True lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : Optional[Any] = False def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = TFBlenderbotSmallModelTester(self ) lowercase__ = ConfigTester(self , config_class=UpperCamelCase ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase ) @require_tokenizers @require_tf class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = [ """Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """ """ i'm going to throw up.\nand why is that?""" ] lowerCAmelCase__ : Dict = """facebook/blenderbot_small-90M""" @cached_property def UpperCamelCase__ (self : Dict ): '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def UpperCamelCase__ (self : Any ): '''simple docstring''' lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' lowercase__ = self.tokenizer(self.src_text , return_tensors='''tf''' ) lowercase__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase , ) lowercase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
2
'''simple docstring''' from ....utils import logging lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ): '''simple docstring''' lowercase__ = config.__dict__ lowercase__ = modal_hidden_size if num_labels: lowercase__ = num_labels
2
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging lowerCamelCase : List[str] = logging.get_logger(__name__) lowerCamelCase : List[str] = { 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = """codegen""" lowerCAmelCase__ : Union[str, Any] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__(self : Any , UpperCamelCase : List[Any]=50400 , UpperCamelCase : Optional[Any]=2048 , UpperCamelCase : List[Any]=2048 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Union[str, Any]=28 , UpperCamelCase : Optional[Any]=16 , UpperCamelCase : Dict=64 , UpperCamelCase : Tuple=None , UpperCamelCase : Optional[int]="gelu_new" , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : int=1E-5 , UpperCamelCase : str=0.02 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Optional[Any]=50256 , UpperCamelCase : Dict=50256 , UpperCamelCase : int=False , **UpperCamelCase : Optional[int] , ): '''simple docstring''' lowercase__ = vocab_size lowercase__ = n_ctx lowercase__ = n_positions lowercase__ = n_embd lowercase__ = n_layer lowercase__ = n_head lowercase__ = n_inner lowercase__ = rotary_dim lowercase__ = activation_function lowercase__ = resid_pdrop lowercase__ = embd_pdrop lowercase__ = attn_pdrop lowercase__ = layer_norm_epsilon lowercase__ = initializer_range lowercase__ = use_cache lowercase__ = bos_token_id lowercase__ = eos_token_id super().__init__( bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , tie_word_embeddings=UpperCamelCase , **UpperCamelCase ) class __lowerCAmelCase (lowercase_ ): '''simple docstring''' def __init__(self : Union[str, Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ): '''simple docstring''' super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase ) if not getattr(self._config , '''pad_token_id''' , UpperCamelCase ): # TODO: how to do that better? lowercase__ = 0 @property def UpperCamelCase__ (self : List[str] ): '''simple docstring''' lowercase__ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction='''inputs''' ) lowercase__ = {0: '''batch''', 1: '''past_sequence + sequence'''} else: lowercase__ = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def UpperCamelCase__ (self : Any ): '''simple docstring''' return self._config.n_layer @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return self._config.n_head def UpperCamelCase__ (self : List[Any] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' lowercase__ = super(UpperCamelCase , self ).generate_dummy_inputs( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) # We need to order the input in the way they appears in the forward() lowercase__ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowercase__ ,lowercase__ = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowercase__ = seqlen + 2 lowercase__ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase__ = [ (torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers ) ] lowercase__ = common_inputs['''attention_mask'''] if self.use_past: lowercase__ = ordered_inputs['''attention_mask'''].dtype lowercase__ = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 ) return ordered_inputs @property def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' return 13
2
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Tuple = """cvt""" def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) lowercase__ = num_channels lowercase__ = patch_sizes lowercase__ = patch_stride lowercase__ = patch_padding lowercase__ = embed_dim lowercase__ = num_heads lowercase__ = depth lowercase__ = mlp_ratio lowercase__ = attention_drop_rate lowercase__ = drop_rate lowercase__ = drop_path_rate lowercase__ = qkv_bias lowercase__ = cls_token lowercase__ = qkv_projection_method lowercase__ = kernel_qkv lowercase__ = padding_kv lowercase__ = stride_kv lowercase__ = padding_q lowercase__ = stride_q lowercase__ = initializer_range lowercase__ = layer_norm_eps
2
1
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Any = ["""image_processor""", """tokenizer"""] lowerCAmelCase__ : Union[str, Any] = """BlipImageProcessor""" lowerCAmelCase__ : Optional[int] = """AutoTokenizer""" def __init__(self : List[Any] , UpperCamelCase : Any , UpperCamelCase : int ): '''simple docstring''' lowercase__ = False super().__init__(UpperCamelCase , UpperCamelCase ) lowercase__ = self.image_processor def __call__(self : int , UpperCamelCase : ImageInput = None , UpperCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[bool, str, PaddingStrategy] = False , UpperCamelCase : Union[bool, str, TruncationStrategy] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : int = 0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = False , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, TensorType]] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowercase__ = self.tokenizer lowercase__ = self.tokenizer( text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) return text_encoding # add pixel_values lowercase__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase ) if text is not None: lowercase__ = self.tokenizer( text=UpperCamelCase , add_special_tokens=UpperCamelCase , padding=UpperCamelCase , truncation=UpperCamelCase , max_length=UpperCamelCase , stride=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , return_overflowing_tokens=UpperCamelCase , return_special_tokens_mask=UpperCamelCase , return_offsets_mapping=UpperCamelCase , return_token_type_ids=UpperCamelCase , return_length=UpperCamelCase , verbose=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase , ) else: lowercase__ = None if text_encoding is not None: encoding_image_processor.update(UpperCamelCase ) return encoding_image_processor def UpperCamelCase__ (self : Optional[Any] , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def UpperCamelCase__ (self : str , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.tokenizer.model_input_names lowercase__ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
2
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase : List[Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowerCamelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase : str = np.expand_dims(test_image, axis=0) lowerCamelCase : List[str] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase : Any = 'Normal' if result[0][0] == 1: lowerCamelCase : Any = 'Abnormality detected'
2
1
'''simple docstring''' import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def _SCREAMING_SNAKE_CASE (A ) -> Any: """simple docstring""" assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def _SCREAMING_SNAKE_CASE () -> List[Any]: """simple docstring""" assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def _SCREAMING_SNAKE_CASE () -> Optional[Any]: """simple docstring""" lowercase__ = '''mock-s3-bucket''' lowercase__ = f"s3://{mock_bucket}" lowercase__ = extract_path_from_uri(A ) assert dataset_path.startswith('''s3://''' ) is False lowercase__ = '''./local/path''' lowercase__ = extract_path_from_uri(A ) assert dataset_path == new_dataset_path def _SCREAMING_SNAKE_CASE (A ) -> Dict: """simple docstring""" lowercase__ = is_remote_filesystem(A ) assert is_remote is True lowercase__ = fsspec.filesystem('''file''' ) lowercase__ = is_remote_filesystem(A ) assert is_remote is False @pytest.mark.parametrize('''compression_fs_class''' , A ) def _SCREAMING_SNAKE_CASE (A , A , A , A , A , A , A ) -> List[str]: """simple docstring""" lowercase__ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file} lowercase__ = input_paths[compression_fs_class.protocol] if input_path is None: lowercase__ = f"for '{compression_fs_class.protocol}' compression protocol, " if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(A ) lowercase__ = fsspec.filesystem(compression_fs_class.protocol , fo=A ) assert isinstance(A , A ) lowercase__ = os.path.basename(A ) lowercase__ = expected_filename[: expected_filename.rindex('''.''' )] assert fs.glob('''*''' ) == [expected_filename] with fs.open(A , '''r''' , encoding='''utf-8''' ) as f, open(A , encoding='''utf-8''' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Optional[int]: """simple docstring""" lowercase__ = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path} lowercase__ = compressed_file_paths[protocol] lowercase__ = '''dataset.jsonl''' lowercase__ = f"{protocol}://{member_file_path}::{compressed_file_path}" lowercase__ ,*lowercase__ = fsspec.get_fs_token_paths(A ) assert fs.isfile(A ) assert not fs.isfile('''non_existing_''' + member_file_path ) @pytest.mark.integration def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> Optional[Any]: """simple docstring""" lowercase__ = hf_api.dataset_info(A , token=A ) lowercase__ = HfFileSystem(repo_info=A , token=A ) assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"] assert hffs.isdir('''data''' ) assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' ) with open(A ) as f: assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read() def _SCREAMING_SNAKE_CASE () -> Optional[int]: """simple docstring""" lowercase__ = '''bz2''' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(A , A , clobber=A ) with pytest.warns(A ) as warning_info: importlib.reload(datasets.filesystems ) assert len(A ) == 1 assert ( str(warning_info[0].message ) == f"A filesystem protocol was already set for {protocol} and will be overwritten." )
2
'''simple docstring''' class __lowerCAmelCase : # Public class to implement a graph '''simple docstring''' def __init__(self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = row lowercase__ = col lowercase__ = graph def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCamelCase__ (self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1] lowercase__ = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ) def UpperCamelCase__ (self : Dict ): # And finally, count all islands. '''simple docstring''' lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )] lowercase__ = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += 1 return count
2
1
'''simple docstring''' def _SCREAMING_SNAKE_CASE () -> str: """simple docstring""" lowercase__ = [] lowercase__ = 1 while len(A ) < 1E6: constant.append(str(A ) ) i += 1 lowercase__ = ''''''.join(A ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9_999] ) * int(constant[99_999] ) * int(constant[999_999] ) ) if __name__ == "__main__": print(solution())
2
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCamelCase : Tuple = 'naver-clova-ix/donut-base' class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = DonutProcessor.from_pretrained(UpperCamelCase ) def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } lowercase__ = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) lowercase__ = self.processor.tokenajson(UpperCamelCase ) self.assertDictEqual(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) lowerCamelCase : Any = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu') ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation='relu')) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation='relu')) classifier.add(layers.Dense(units=1, activation='sigmoid')) # Compiling the CNN classifier.compile( optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') lowerCamelCase : Optional[Any] = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) lowerCamelCase : Any = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) lowerCamelCase : List[Any] = train_datagen.flow_from_directory( 'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) lowerCamelCase : List[str] = test_datagen.flow_from_directory( 'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary' ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save('cnn.h5') # Part 3 - Making new predictions lowerCamelCase : List[str] = tf.keras.preprocessing.image.load_img( 'dataset/single_prediction/image.png', target_size=(64, 64) ) lowerCamelCase : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image) lowerCamelCase : str = np.expand_dims(test_image, axis=0) lowerCamelCase : List[str] = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: lowerCamelCase : Any = 'Normal' if result[0][0] == 1: lowerCamelCase : Any = 'Abnormality detected'
2
'''simple docstring''' from __future__ import annotations def _SCREAMING_SNAKE_CASE (A ) -> bool: """simple docstring""" return len(set(A ) ) == len(A ) if __name__ == "__main__": import doctest doctest.testmod()
2
1
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _SCREAMING_SNAKE_CASE (A , A ) -> str: """simple docstring""" assert isinstance(A , A ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Tuple: """simple docstring""" lowercase__ = tmp_path / '''cache''' lowercase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ = JsonDatasetReader(A , cache_dir=A , keep_in_memory=A ).read() _check_json_dataset(A , A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Any: """simple docstring""" lowercase__ = tmp_path / '''cache''' lowercase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase__ = features.copy() if features else default_expected_features lowercase__ = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = JsonDatasetReader(A , features=A , cache_dir=A ).read() _check_json_dataset(A , A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[str]: """simple docstring""" lowercase__ = tmp_path / '''cache''' lowercase__ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} lowercase__ = features.copy() if features else default_expected_features lowercase__ = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = JsonDatasetReader(A , features=A , cache_dir=A ).read() assert isinstance(A , A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def _SCREAMING_SNAKE_CASE (A , A ) -> Union[str, Any]: """simple docstring""" lowercase__ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} lowercase__ = features.copy() lowercase__ = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = tmp_path / '''cache''' lowercase__ = JsonDatasetReader(A , features=A , cache_dir=A ).read() assert isinstance(A , A ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" lowercase__ = tmp_path / '''cache''' lowercase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase__ = JsonDatasetReader(A , cache_dir=A , split=A ).read() _check_json_dataset(A , A ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Tuple: """simple docstring""" if issubclass(A , A ): lowercase__ = jsonl_path elif issubclass(A , A ): lowercase__ = [jsonl_path] lowercase__ = tmp_path / '''cache''' lowercase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase__ = JsonDatasetReader(A , cache_dir=A ).read() _check_json_dataset(A , A ) def _SCREAMING_SNAKE_CASE (A , A , A=("train",) ) -> Tuple: """simple docstring""" assert isinstance(A , A ) for split in splits: lowercase__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> str: """simple docstring""" lowercase__ = tmp_path / '''cache''' lowercase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=A , keep_in_memory=A ).read() _check_json_datasetdict(A , A ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> List[Any]: """simple docstring""" lowercase__ = tmp_path / '''cache''' lowercase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase__ = features.copy() if features else default_expected_features lowercase__ = ( Features({feature: Value(A ) for feature, dtype in features.items()} ) if features is not None else None ) lowercase__ = JsonDatasetReader({'''train''': jsonl_path} , features=A , cache_dir=A ).read() _check_json_datasetdict(A , A ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _SCREAMING_SNAKE_CASE (A , A , A ) -> Union[str, Any]: """simple docstring""" if split: lowercase__ = {split: jsonl_path} else: lowercase__ = '''train''' lowercase__ = {'''train''': jsonl_path, '''test''': jsonl_path} lowercase__ = tmp_path / '''cache''' lowercase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowercase__ = JsonDatasetReader(A , cache_dir=A ).read() _check_json_datasetdict(A , A , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def _SCREAMING_SNAKE_CASE (A ) -> Dict: """simple docstring""" return json.load(A ) def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" return [json.loads(A ) for line in buffer] class __lowerCAmelCase : '''simple docstring''' @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def UpperCamelCase__ (self : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase ).write() buffer.seek(0 ) lowercase__ = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def UpperCamelCase__ (self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase ).write() buffer.seek(0 ) lowercase__ = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) lowercase__ = load_json_function(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) assert isinstance(exported_content[0] , UpperCamelCase ) assert len(UpperCamelCase ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def UpperCamelCase__ (self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Tuple ): '''simple docstring''' with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , lines=UpperCamelCase , orient=UpperCamelCase , num_proc=2 ).write() buffer.seek(0 ) lowercase__ = load_json(UpperCamelCase ) assert isinstance(UpperCamelCase , UpperCamelCase ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(UpperCamelCase , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(UpperCamelCase ) == 10 def UpperCamelCase__ (self : Dict , UpperCamelCase : Any ): '''simple docstring''' with pytest.raises(UpperCamelCase ): with io.BytesIO() as buffer: JsonDatasetWriter(UpperCamelCase , UpperCamelCase , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def UpperCamelCase__ (self : str , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : List[Any] ): '''simple docstring''' lowercase__ = tmp_path_factory.mktemp('''data''' ) / f"test.json.{extension}" lowercase__ = str(shared_datadir / f"test_file.json.{extension}" ) JsonDatasetWriter(UpperCamelCase , UpperCamelCase , compression=UpperCamelCase ).write() with fsspec.open(UpperCamelCase , '''rb''' , compression='''infer''' ) as f: lowercase__ = f.read() with fsspec.open(UpperCamelCase , '''rb''' , compression='''infer''' ) as f: lowercase__ = f.read() assert exported_content == original_content
2
'''simple docstring''' import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCamelCase : Any = None lowerCamelCase : Dict = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : List[str] = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCamelCase : Any = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES lowerCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : int = ["""input_ids""", """attention_mask"""] lowerCAmelCase__ : Optional[int] = TaTokenizer lowerCAmelCase__ : List[int] = [] def __init__(self : Dict , UpperCamelCase : str=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any="</s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : List[str]="<pad>" , UpperCamelCase : List[str]=100 , UpperCamelCase : Tuple=None , **UpperCamelCase : List[str] , ): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: lowercase__ = [f"<extra_id_{i}>" for i in range(UpperCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowercase__ = len(set(filter(lambda UpperCamelCase : bool('''extra_id_''' in str(UpperCamelCase ) ) , UpperCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" ''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids''' ''' tokens''' ) super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , extra_ids=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True lowercase__ = extra_ids @staticmethod def UpperCamelCase__ (UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowercase__ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( '''This tokenizer was incorrectly instantiated with a model max length of''' f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this" ''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with''' ''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on''' f" {pretrained_model_name_or_path} automatically truncating your input to" f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences" f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with" ''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please''' ''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , UpperCamelCase , ) return max_model_length def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return lowercase__ = os.path.join( UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) logger.info(f"Copy vocab file to {out_vocab_file}" ) return (out_vocab_file,) def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowercase__ = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' lowercase__ = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return list( set(filter(lambda UpperCamelCase : bool(re.search(R'''<extra_id_\d+>''' , UpperCamelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return [self.convert_tokens_to_ids(UpperCamelCase ) for token in self.get_sentinel_tokens()]
2
1
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _SCREAMING_SNAKE_CASE (A , A , A , A ) -> Optional[int]: """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" def _SCREAMING_SNAKE_CASE (A , A , A , A , A=True ) -> Any: """simple docstring""" model.train() lowercase__ = model(A ) lowercase__ = F.mse_loss(A , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(A ) def _SCREAMING_SNAKE_CASE (A , A=False ) -> Optional[int]: """simple docstring""" set_seed(42 ) lowercase__ = RegressionModel() lowercase__ = deepcopy(A ) lowercase__ = RegressionDataset(length=80 ) lowercase__ = DataLoader(A , batch_size=16 ) model.to(accelerator.device ) if sched: lowercase__ = AdamW(params=model.parameters() , lr=1E-3 ) lowercase__ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) lowercase__ = LambdaLR(A , lr_lambda=lambda A : epoch**0.65 ) lowercase__ = LambdaLR(A , lr_lambda=lambda A : epoch**0.65 ) # Make a copy of `model` if sched: lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = accelerator.prepare(A , A , A , A ) else: lowercase__ ,lowercase__ = accelerator.prepare(A , A ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A ) # Use a single batch lowercase__ ,lowercase__ = next(iter(A ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) ) lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(A , A , A , A ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(A ): step_model(A , A , A , A ) else: # Sync grads step_model(A , A , A , A ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(A , A , A , A ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase__ = ddp_input[torch.randperm(len(A ) )] def _SCREAMING_SNAKE_CASE (A ) -> Optional[Any]: """simple docstring""" lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A ) # Use a single batch lowercase__ ,lowercase__ = next(iter(A ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) ) lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(A , A , A , A ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(A ): step_model(A , A , A , A ) else: # Sync grads step_model(A , A , A , A ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase__ = ddp_input[torch.randperm(len(A ) )] def _SCREAMING_SNAKE_CASE (A=False , A=False ) -> Tuple: """simple docstring""" lowercase__ = Accelerator( split_batches=A , dispatch_batches=A , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A ) for iteration, batch in enumerate(A ): lowercase__ ,lowercase__ = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) ) lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(A , A , A , A , A ) # Do "gradient accumulation" (noop) with accelerator.accumulate(A ): step_model(A , A , A , A ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(A ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) lowercase__ = ddp_input[torch.randperm(len(A ) )] GradientState._reset_state() def _SCREAMING_SNAKE_CASE (A=False , A=False ) -> str: """simple docstring""" lowercase__ = Accelerator( split_batches=A , dispatch_batches=A , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ = get_training_setup(A , A ) for iteration, batch in enumerate(A ): lowercase__ ,lowercase__ = batch.values() # Gather the distributed inputs and targs for the base model lowercase__ ,lowercase__ = accelerator.gather((ddp_input, ddp_target) ) lowercase__ ,lowercase__ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(A , A , A , A , A ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(A )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(A ): step_model(A , A , A , A ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n" lowercase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(A )) if accelerator.num_processes > 1: check_model_parameters(A , A , A , A ) # Shuffle ddp_input on each iteration torch.manual_seed(1_337 + iteration ) GradientState._reset_state() def _SCREAMING_SNAKE_CASE () -> str: """simple docstring""" lowercase__ = Accelerator() lowercase__ = RegressionDataset(length=80 ) lowercase__ = DataLoader(A , batch_size=16 ) lowercase__ = RegressionDataset(length=96 ) lowercase__ = DataLoader(A , batch_size=16 ) lowercase__ ,lowercase__ = accelerator.prepare(A , A ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(A ): assert id(accelerator.gradient_state.active_dataloader ) == id(A ) if iteration < len(A ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(A ): assert id(accelerator.gradient_state.active_dataloader ) == id(A ) if batch_num < len(A ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _SCREAMING_SNAKE_CASE () -> Union[str, Any]: """simple docstring""" lowercase__ = Accelerator() lowercase__ = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(A ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(A ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , ) test_gradient_accumulation(A , A ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , ) test_gradient_accumulation_with_opt_and_scheduler(A , A ) def _SCREAMING_SNAKE_CASE (A ) -> List[str]: """simple docstring""" main() if __name__ == "__main__": main()
2
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __lowerCAmelCase (lowercase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ : Dict = ShapEImgaImgPipeline lowerCAmelCase__ : List[str] = ["""image"""] lowerCAmelCase__ : Any = ["""image"""] lowerCAmelCase__ : Any = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowerCAmelCase__ : Tuple = False @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : str ): '''simple docstring''' return 32 @property def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase__ (self : int ): '''simple docstring''' return 8 @property def UpperCamelCase__ (self : Any ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) lowercase__ = CLIPVisionModel(UpperCamelCase ) return model @property def UpperCamelCase__ (self : List[Any] ): '''simple docstring''' lowercase__ = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase , do_normalize=UpperCamelCase , do_resize=UpperCamelCase , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor @property def UpperCamelCase__ (self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } lowercase__ = PriorTransformer(**UpperCamelCase ) return model @property def UpperCamelCase__ (self : int ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } lowercase__ = ShapERenderer(**UpperCamelCase ) return model def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.dummy_prior lowercase__ = self.dummy_image_encoder lowercase__ = self.dummy_image_processor lowercase__ = self.dummy_renderer lowercase__ = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase , clip_sample=UpperCamelCase , clip_sample_range=1.0 , ) lowercase__ = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith('''mps''' ): lowercase__ = torch.manual_seed(UpperCamelCase ) else: lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) lowercase__ = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCamelCase__ (self : Tuple ): '''simple docstring''' lowercase__ = '''cpu''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) lowercase__ = output.images[0] lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = torch_device == '''cpu''' lowercase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase , relax_max_difference=UpperCamelCase , ) def UpperCamelCase__ (self : int ): '''simple docstring''' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**UpperCamelCase ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = 1 lowercase__ = 2 lowercase__ = self.get_dummy_inputs(UpperCamelCase ) for key in inputs.keys(): if key in self.batch_params: lowercase__ = batch_size * [inputs[key]] lowercase__ = pipe(**UpperCamelCase , num_images_per_prompt=UpperCamelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __lowerCAmelCase (unittest.TestCase ): '''simple docstring''' def UpperCamelCase__ (self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ (self : Union[str, Any] ): '''simple docstring''' lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) lowercase__ = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) lowercase__ = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) lowercase__ = pipe( UpperCamelCase , generator=UpperCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
2
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : Tuple = logging.get_logger(__name__) lowerCamelCase : Any = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class __lowerCAmelCase (lowercase_ ): '''simple docstring''' lowerCAmelCase__ : Optional[int] = """markuplm""" def __init__(self : Optional[Any] , UpperCamelCase : str=30522 , UpperCamelCase : Tuple=768 , UpperCamelCase : List[str]=12 , UpperCamelCase : List[Any]=12 , UpperCamelCase : List[str]=3072 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : Dict=0.1 , UpperCamelCase : str=0.1 , UpperCamelCase : Tuple=512 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : int=0.02 , UpperCamelCase : str=1E-12 , UpperCamelCase : List[Any]=0 , UpperCamelCase : Dict=0 , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Any=256 , UpperCamelCase : Dict=1024 , UpperCamelCase : int=216 , UpperCamelCase : Optional[int]=1001 , UpperCamelCase : Dict=32 , UpperCamelCase : Optional[int]=50 , UpperCamelCase : str="absolute" , UpperCamelCase : Any=True , UpperCamelCase : Any=None , **UpperCamelCase : Tuple , ): '''simple docstring''' super().__init__( pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase , ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout # additional properties lowercase__ = max_depth lowercase__ = max_xpath_tag_unit_embeddings lowercase__ = max_xpath_subs_unit_embeddings lowercase__ = tag_pad_id lowercase__ = subs_pad_id lowercase__ = xpath_unit_hidden_size
2
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCamelCase : str = { 'configuration_rag': ['RagConfig'], 'retrieval_rag': ['RagRetriever'], 'tokenization_rag': ['RagTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict = [ 'RagModel', 'RagPreTrainedModel', 'RagSequenceForGeneration', 'RagTokenForGeneration', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[int] = [ 'TFRagModel', 'TFRagPreTrainedModel', 'TFRagSequenceForGeneration', 'TFRagTokenForGeneration', ] if TYPE_CHECKING: from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever from .tokenization_rag import RagTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rag import ( TFRagModel, TFRagPreTrainedModel, TFRagSequenceForGeneration, TFRagTokenForGeneration, ) else: import sys lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
2
1