code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup _A : List[Any] ='''https://www.indeed.co.in/jobs?q=mobile+app+development&l=''' def SCREAMING_SNAKE_CASE_ (UpperCamelCase = "mumbai" ) -> Generator[tuple[str, str], None, None]: lowerCamelCase__ : Union[str, Any] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" ) # This attribute finds out all the specifics listed in a job for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ): lowerCamelCase__ : Optional[int] = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip() lowerCamelCase__ : Any = job.find("""span""" , {"""class""": """company"""} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs('''Bangalore'''), 1): print(F'Job {i:>2} is {job[0]} at {job[1]}')
41
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): def _lowercase (self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase (self : str ): UpperCAmelCase_ = 1 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def _lowercase (self : int ): torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _lowercase (self : Any ): torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowercase (self : Optional[Any] ): torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__a ) def _lowercase (self : Any ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0] UpperCAmelCase_ = image[0, -3:, -3:, -1] UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase (self : Optional[int] ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowercase (self : str ): UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCAmelCase_ = unet.half() UpperCAmelCase_ = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : List[Any] ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _lowercase (self : Tuple ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _lowercase (self : List[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , ) UpperCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
1
0
'''simple docstring''' import random import unittest from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, skip_first_batches, ) class __UpperCAmelCase ( _lowerCamelCase ): def __init__( self , lowerCAmelCase_=0.01 , lowerCAmelCase_=10_00 ): """simple docstring""" _snake_case = p_stop _snake_case = max_length def __iter__( self ): """simple docstring""" _snake_case = 0 _snake_case = False while not stop and count < self.max_length: yield count count += 1 _snake_case = random.random() < self.p_stop class __UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=True ): """simple docstring""" _snake_case = [ BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) for i in range(2 ) ] _snake_case = [list(lowerCAmelCase_ ) for batch_sampler_shard in batch_sampler_shards] if not split_batches: self.assertListEqual([len(lowerCAmelCase_ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase_ ) for e in expected] ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) # Check the shards when the dataset is very small. _snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [[], []] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) # Check the shards when the dataset is not a round multiple of batch size. _snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) # Check the shards when the dataset is very small. _snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [[], []] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) # Check the shards when the dataset is a round multiple of batch size but not total batch size. _snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. _snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. _snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) # Check the shards when the dataset is very small. _snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [[[0, 1]], []] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ ) _snake_case = [[], []] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) # Expected shouldn't change self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) # Check the shards when the dataset is not a round multiple of batch size. _snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) # Check the shards when the dataset is not a round multiple of batch size or num_processes. _snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) # Check the shards when the dataset is very small. _snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [[[0, 1]], []] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) _snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = [[], []] self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] _snake_case = [BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) for i in range(2 )] self.assertEqual(len(batch_sampler_shards[0] ) , 3 ) self.assertEqual(len(batch_sampler_shards[1] ) , 2 ) self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] ) self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] ) def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=False ): """simple docstring""" random.seed(lowerCAmelCase_ ) _snake_case = list(lowerCAmelCase_ ) _snake_case = [ IterableDatasetShard( lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ , num_processes=lowerCAmelCase_ , process_index=lowerCAmelCase_ , split_batches=lowerCAmelCase_ , ) for i in range(lowerCAmelCase_ ) ] _snake_case = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(lowerCAmelCase_ ) iterable_dataset_lists.append(list(lowerCAmelCase_ ) ) _snake_case = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size _snake_case = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) self.assertTrue(len(lowerCAmelCase_ ) % shard_batch_size == 0 ) _snake_case = [] for idx in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ): reference += reference self.assertListEqual(lowerCAmelCase_ , reference[: len(lowerCAmelCase_ )] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = 42 _snake_case = RandomIterableDataset() self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) # Edge case with a very small dataset _snake_case = RandomIterableDataset(max_length=2 ) self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase_ ) _snake_case = SkipBatchSampler(lowerCAmelCase_ , 2 ) self.assertListEqual(list(lowerCAmelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 ) self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = DataLoader(list(range(16 ) ) , batch_size=4 ) _snake_case = skip_first_batches(lowerCAmelCase_ , num_batches=2 ) self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] ) def lowerCamelCase ( self ): """simple docstring""" _snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 ) for idx, _ in enumerate(lowerCAmelCase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowerCAmelCase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) def lowerCamelCase ( self ): """simple docstring""" Accelerator() _snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 ) for idx, _ in enumerate(lowerCAmelCase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 ) # Test it also works on the second iteration for idx, _ in enumerate(lowerCAmelCase_ ): self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
42
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __A ( UpperCamelCase__ ): def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ): UpperCAmelCase_ = 1.0 if scale is None else scale UpperCAmelCase_ = 0.0 if loc is None else loc super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] ) @property def _lowercase (self : Union[str, Any] ): return self.base_dist.mean * self.scale + self.loc @property def _lowercase (self : List[Any] ): return self.base_dist.variance * self.scale**2 @property def _lowercase (self : List[Any] ): return self.variance.sqrt() class __A ( nn.Module ): def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ): super().__init__(**__a ) UpperCAmelCase_ = args_dim UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] ) UpperCAmelCase_ = domain_map def _lowercase (self : List[str] , __a : torch.Tensor ): UpperCAmelCase_ = [proj(__a ) for proj in self.proj] return self.domain_map(*__a ) class __A ( nn.Module ): def __init__(self : Union[str, Any] , __a : List[str] ): super().__init__() UpperCAmelCase_ = function def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ): return self.function(__a , *__a ) class __A : a__ : type a__ : int a__ : Dict[str, int] def __init__(self : List[Any] , __a : int = 1 ): UpperCAmelCase_ = dim UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim} def _lowercase (self : Any , __a : Any ): if self.dim == 1: return self.distribution_class(*__a ) else: return Independent(self.distribution_class(*__a ) , 1 ) def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ): UpperCAmelCase_ = self._base_distribution(__a ) if loc is None and scale is None: return distr else: return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim ) @property def _lowercase (self : Any ): return () if self.dim == 1 else (self.dim,) @property def _lowercase (self : Dict ): return len(self.event_shape ) @property def _lowercase (self : Tuple ): return 0.0 def _lowercase (self : List[str] , __a : int ): return ParameterProjection( in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _lowercase (self : Optional[int] , *__a : torch.Tensor ): raise NotImplementedError() @staticmethod def _lowercase (__a : torch.Tensor ): return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0 class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} a__ : type = StudentT @classmethod def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCAmelCase_ = 2.0 + cls.squareplus(__a ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"loc": 1, "scale": 1} a__ : type = Normal @classmethod def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"total_count": 1, "logits": 1} a__ : type = NegativeBinomial @classmethod def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _lowercase (self : List[str] , __a : str ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if self.dim == 1: return self.distribution_class(total_count=__a , logits=__a ) else: return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 ) def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
1
0
from scipy.stats import spearmanr import datasets __lowercase = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' __lowercase = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' __lowercase = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase_ ( datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float'''), '''references''': datasets.Value('''float'''), }) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , ) def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> List[str]: __UpperCamelCase :Optional[Any] = spearmanr(__lowercase , __lowercase) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
43
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def _lowercase (self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ): UpperCAmelCase_ = 0.0 for i, j in zip(__a , __a ): n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0 UpperCAmelCase_ = n_correct / len(__a ) return { "accuracy": accuracy, }
1
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __A : _UpperCamelCase : Any = XGLMConfig _UpperCamelCase : List[Any] = {} _UpperCamelCase : Optional[int] = "gelu" def __init__( self , a__ , a__=14 , a__=7 , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0.0_2 , ): _lowerCAmelCase : Optional[int] = parent _lowerCAmelCase : int = batch_size _lowerCAmelCase : Optional[Any] = seq_length _lowerCAmelCase : Any = is_training _lowerCAmelCase : Optional[int] = use_input_mask _lowerCAmelCase : str = use_labels _lowerCAmelCase : Any = vocab_size _lowerCAmelCase : Optional[int] = d_model _lowerCAmelCase : int = num_hidden_layers _lowerCAmelCase : Union[str, Any] = num_attention_heads _lowerCAmelCase : Union[str, Any] = ffn_dim _lowerCAmelCase : Any = activation_function _lowerCAmelCase : Tuple = activation_dropout _lowerCAmelCase : int = attention_dropout _lowerCAmelCase : Optional[int] = max_position_embeddings _lowerCAmelCase : str = initializer_range _lowerCAmelCase : List[Any] = None _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : str = 2 _lowerCAmelCase : Optional[int] = 1 def __A ( self ): return XGLMConfig.from_pretrained("""facebook/xglm-564M""" ) def __A ( self ): _lowerCAmelCase : Dict = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) _lowerCAmelCase : Tuple = None if self.use_input_mask: _lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase : int = self.get_config() _lowerCAmelCase : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def __A ( self ): return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=a__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=a__ , ) def __A ( self ): _lowerCAmelCase : Tuple = self.prepare_config_and_inputs() ( ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ( _lowerCAmelCase ) , ) : List[str] = config_and_inputs _lowerCAmelCase : Tuple = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () _UpperCamelCase : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else () _UpperCamelCase : Optional[Any] = ( {"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {} ) _UpperCamelCase : Any = False _UpperCamelCase : int = False _UpperCamelCase : Union[str, Any] = False def __A ( self ): _lowerCAmelCase : List[str] = TFXGLMModelTester(self ) _lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=a__ , n_embd=37 ) def __A ( self ): self.config_tester.run_common_tests() @slow def __A ( self ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase : Any = TFXGLMModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" ) def __A ( self ): super().test_resize_token_embeddings() @require_tf class __A ( unittest.TestCase ): @slow def __A ( self , a__=True ): _lowerCAmelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCAmelCase : Any = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on _lowerCAmelCase : str = model.generate(a__ , do_sample=a__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , a__ ) @slow def __A ( self ): _lowerCAmelCase : str = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) tf.random.set_seed(0 ) _lowerCAmelCase : List[Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" ) _lowerCAmelCase : Optional[Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0""" ): _lowerCAmelCase : List[str] = model.generate(a__ , do_sample=a__ , seed=[7, 0] ) _lowerCAmelCase : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a__ ) _lowerCAmelCase : Union[str, Any] = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(a__ , a__ ) @slow def __A ( self ): _lowerCAmelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase : List[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) _lowerCAmelCase : Optional[Any] = """left""" # use different length sentences to test batching _lowerCAmelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCAmelCase : int = tokenizer(a__ , return_tensors="""tf""" , padding=a__ ) _lowerCAmelCase : Optional[int] = inputs["""input_ids"""] _lowerCAmelCase : List[Any] = model.generate(input_ids=a__ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 ) _lowerCAmelCase : Any = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids _lowerCAmelCase : List[Any] = model.generate(input_ids=a__ , max_new_tokens=12 ) _lowerCAmelCase : str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids _lowerCAmelCase : Optional[Any] = model.generate(input_ids=a__ , max_new_tokens=12 ) _lowerCAmelCase : Any = tokenizer.batch_decode(a__ , skip_special_tokens=a__ ) _lowerCAmelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a__ ) _lowerCAmelCase : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=a__ ) _lowerCAmelCase : Dict = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(a__ , a__ ) self.assertListEqual(a__ , [non_padded_sentence, padded_sentence] )
44
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]: '''simple docstring''' model.train() UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict: '''simple docstring''' set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(snake_case_ ) UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ )) if accelerator.num_processes > 1: check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ = RegressionDataset(length=96 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if iteration < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if batch_num < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(snake_case_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(snake_case_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case_ , snake_case_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Dict ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
1
0
"""simple docstring""" from importlib import import_module from .logging import get_logger lowercase_ = get_logger(__name__) class __lowerCAmelCase : '''simple docstring''' def __init__( self , _a , _a=None ): __a = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , _a , getattr(_a , _a ) ) __a = module._original_module if isinstance(_a , _PatchedModuleObj ) else module class __lowerCAmelCase : '''simple docstring''' __UpperCAmelCase : int = [] def __init__( self , _a , _a , _a , _a=None ): __a = obj __a = target __a = new __a = target.split('''.''' )[0] __a = {} __a = attrs or [] def __enter__( self ): *__a , __a = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(_a ) ): try: __a = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __a = getattr(self.obj , _a ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(_a , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __a = obj_attr # patch at top level setattr(self.obj , _a , _PatchedModuleObj(_a , attrs=self.attrs ) ) __a = getattr(self.obj , _a ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(_a , _a , _PatchedModuleObj(getattr(_a , _a , _a ) , attrs=self.attrs ) ) __a = getattr(_a , _a ) # finally set the target attribute setattr(_a , _a , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __a = getattr(import_module('''.'''.join(_a ) ) , _a ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , _a ) is attr_value: __a = getattr(self.obj , _a ) setattr(self.obj , _a , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __a = globals()['''__builtins__'''][target_attr] setattr(self.obj , _a , self.new ) else: raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' ) def __exit__( self , *_a ): for attr in list(self.original ): setattr(self.obj , _a , self.original.pop(_a ) ) def __UpperCAmelCase ( self ): self.__enter__() self._active_patches.append(self ) def __UpperCAmelCase ( self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
45
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(snake_case_ , x % y ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int: '''simple docstring''' UpperCAmelCase_ = 1 for i in range(1 , n + 1 ): UpperCAmelCase_ = lcm(snake_case_ , snake_case_ ) return g if __name__ == "__main__": print(f"{solution() = }")
1
0
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = "cpu" , SCREAMING_SNAKE_CASE : Union[str, None] = None ): '''simple docstring''' lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE ) for k, v in tqdm(state_dict.items() ): if not isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ): raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" ) lowerCAmelCase = v.half() if save_path is None: # overwrite src_path lowerCAmelCase = src_path torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": fire.Fire(convert)
46
'''simple docstring''' import os from math import logaa def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int: '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ): UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) ) if x * logaa(snake_case_ ) > largest: UpperCAmelCase_ = x * logaa(snake_case_ ) UpperCAmelCase_ = i + 1 return result if __name__ == "__main__": print(solution())
1
0
'''simple docstring''' def _lowerCAmelCase ( _UpperCamelCase : list , _UpperCamelCase : int = 0 ) -> list: """simple docstring""" _SCREAMING_SNAKE_CASE =length or len(_UpperCamelCase ) _SCREAMING_SNAKE_CASE =False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =list_data[i + 1], list_data[i] _SCREAMING_SNAKE_CASE =True return list_data if not swapped else bubble_sort(_UpperCamelCase , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = checkpoint UpperCAmelCase_ = {} UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["quant_conv.bias"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ ) } for i in range(snake_case_ ): UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""" ) UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""" ) UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) for i in range(snake_case_ ): UpperCAmelCase_ = num_up_blocks - 1 - i UpperCAmelCase_ = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) return new_checkpoint def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict: '''simple docstring''' UpperCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCAmelCase_ = io.BytesIO(r.content ) UpperCAmelCase_ = OmegaConf.load(snake_case_ ) UpperCAmelCase_ = 5_12 UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCAmelCase_ = {} with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f: for key in f.keys(): UpperCAmelCase_ = f.get_tensor(snake_case_ ) else: UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"] # Convert the VAE model. UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ ) UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ ) UpperCAmelCase_ = AutoencoderKL(**snake_case_ ) vae.load_state_dict(snake_case_ ) vae.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') SCREAMING_SNAKE_CASE_: str =parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
1
0
def A ( _SCREAMING_SNAKE_CASE ) -> int: if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ): raise TypeError("Input value must be a 'int' type" ) return bin(_SCREAMING_SNAKE_CASE ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
48
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __A ( unittest.TestCase ): def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (image_size // patch_size) ** 2 UpperCAmelCase_ = num_patches + 1 def _lowercase (self : Any ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , ) return config, pixel_values def _lowercase (self : Dict , __a : Any , __a : List[Any] ): UpperCAmelCase_ = FlaxViTModel(config=__a ) UpperCAmelCase_ = model(__a ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (self.image_size, self.image_size) UpperCAmelCase_ = (self.patch_size, self.patch_size) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _lowercase (self : Tuple , __a : str , __a : Any ): UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = FlaxViTForImageClassification(config=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = FlaxViTForImageClassification(__a ) UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _lowercase (self : Any ): UpperCAmelCase_ = FlaxViTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(__a , __a ) UpperCAmelCase_ = model_class(__a ) @jax.jit def model_jitted(__a : Tuple , **__a : List[Any] ): return model(pixel_values=__a , **__a ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase (self : Tuple ): for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__a )
1
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class _A ( unittest.TestCase ): UpperCamelCase__ : str = ViTImageProcessor if is_vision_available() else None @property def _lowerCamelCase ( self : List[Any]): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = (3, 32, 128) __a = tempfile.mkdtemp() # fmt: off __a = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __a = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE)))) __a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp: fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + '''\n''') __a = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } __a = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE) with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp: json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : str): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Any): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : str): '''simple docstring''' shutil.rmtree(self.tmpdirname) def _lowerCamelCase ( self : List[Any]): '''simple docstring''' __a = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) __a = Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) return image_input def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.get_tokenizer() __a = self.get_image_processor() __a = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) processor.save_pretrained(self.tmpdirname) __a = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __SCREAMING_SNAKE_CASE) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.get_tokenizer() __a = self.get_image_processor() __a = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) processor.save_pretrained(self.tmpdirname) __a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''') __a = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0) __a = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , __SCREAMING_SNAKE_CASE) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : int): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = self.prepare_image_inputs() __a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''') __a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''') for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _lowerCamelCase ( self : Dict): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = '''test''' __a = processor(text=__SCREAMING_SNAKE_CASE) __a = tokenizer(__SCREAMING_SNAKE_CASE) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = '''test''' __a = self.prepare_image_inputs() __a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE) self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''labels''']) # test if it raises when no input is passed with pytest.raises(__SCREAMING_SNAKE_CASE): processor() def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __a = processor.char_decode(__SCREAMING_SNAKE_CASE) __a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE) __a = [seq.replace(''' ''' , '''''') for seq in decoded_tok] self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) def _lowerCamelCase ( self : Optional[Any]): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = None __a = self.prepare_image_inputs() __a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def _lowerCamelCase ( self : Union[str, Any]): '''simple docstring''' __a = self.get_image_processor() __a = self.get_tokenizer() __a = MgpstrProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE) __a = torch.randn(1 , 27 , 38) __a = torch.randn(1 , 27 , 50_257) __a = torch.randn(1 , 27 , 30_522) __a = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''])
49
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 5 # Realm tok UpperCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__a , exist_ok=__a ) def _lowercase (self : Optional[Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def _lowercase (self : Any ): shutil.rmtree(self.tmpdirname ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase (self : List[str] ): UpperCAmelCase_ = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase (self : Any ): UpperCAmelCase_ = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__a , ) return block_records def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _lowercase (self : int ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual([False, True, True] , __a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: UpperCAmelCase_ = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
1
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase _UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) _UpperCAmelCase : Any = { """allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""", """allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""", """allenai/longformer-large-4096-finetuned-triviaqa""": ( """https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json""" ), """allenai/longformer-base-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json""" ), """allenai/longformer-large-4096-extra.pos.embd.only""": ( """https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json""" ), } class lowerCAmelCase ( __UpperCamelCase ): UpperCAmelCase__ = """longformer""" def __init__( self : Any , UpperCAmelCase : Union[List[int], int] = 512 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 30522 , UpperCAmelCase : int = 768 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 12 , UpperCAmelCase : int = 3072 , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 512 , UpperCAmelCase : int = 2 , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 1e-12 , UpperCAmelCase : bool = False , **UpperCAmelCase : int , ) -> Union[str, Any]: super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase ) lowerCamelCase__ : str = attention_window lowerCamelCase__ : Optional[int] = sep_token_id lowerCamelCase__ : Optional[Any] = bos_token_id lowerCamelCase__ : int = eos_token_id lowerCamelCase__ : Any = vocab_size lowerCamelCase__ : Union[str, Any] = hidden_size lowerCamelCase__ : str = num_hidden_layers lowerCamelCase__ : int = num_attention_heads lowerCamelCase__ : List[str] = hidden_act lowerCamelCase__ : Any = intermediate_size lowerCamelCase__ : Optional[int] = hidden_dropout_prob lowerCamelCase__ : List[str] = attention_probs_dropout_prob lowerCamelCase__ : Tuple = max_position_embeddings lowerCamelCase__ : str = type_vocab_size lowerCamelCase__ : List[Any] = initializer_range lowerCamelCase__ : Union[str, Any] = layer_norm_eps lowerCamelCase__ : Optional[Any] = onnx_export class lowerCAmelCase ( __UpperCamelCase ): def __init__( self : Optional[Any] , UpperCAmelCase : "PretrainedConfig" , UpperCAmelCase : str = "default" , UpperCAmelCase : "List[PatchingSpec]" = None ) -> Any: super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : Any = True @property def A_ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowerCamelCase__ : int = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCamelCase__ : Optional[int] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def A_ ( self : Dict ) -> Mapping[str, Mapping[int, str]]: lowerCamelCase__ : Any = super().outputs if self.task == "default": lowerCamelCase__ : List[Any] = {0: 'batch'} return outputs @property def A_ ( self : Optional[int] ) -> float: return 1e-4 @property def A_ ( self : str ) -> int: # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def A_ ( self : List[str] , UpperCAmelCase : "PreTrainedTokenizerBase" , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowerCamelCase__ : List[str] = super().generate_dummy_inputs( preprocessor=UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly lowerCamelCase__ : Dict = torch.zeros_like(inputs['input_ids'] ) # make every second token global lowerCamelCase__ : Dict = 1 return inputs
50
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K) def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float: '''simple docstring''' if donor_conc <= 0: raise ValueError("Donor concentration should be positive" ) elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive" ) elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive" ) elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : int = GPTSanJapaneseTokenizer UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[Any] = {'''do_clean_text''': False, '''add_prefix_space''': False} def lowerCamelCase ( self : Any): """simple docstring""" super().setUp() # fmt: off UpperCAmelCase_ = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>'''] # fmt: on UpperCAmelCase_ = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀 UpperCAmelCase_ = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file''']) UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file''']) with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens])) with open(self.emoji_file , '''w''') as emoji_writer: emoji_writer.write(json.dumps(_snake_case)) def lowerCamelCase ( self : int , **_snake_case : Any): """simple docstring""" kwargs.update(self.special_tokens_map) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Tuple , _snake_case : Tuple): """simple docstring""" UpperCAmelCase_ = '''こんにちは、世界。 \nこんばんは、㔺界。😀''' UpperCAmelCase_ = '''こんにちは、世界。 \nこんばんは、世界。😀''' return input_text, output_text def lowerCamelCase ( self : Union[str, Any] , _snake_case : int): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ = self.get_input_output_texts(_snake_case) UpperCAmelCase_ = tokenizer.encode(_snake_case , add_special_tokens=_snake_case) UpperCAmelCase_ = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case) return text, ids def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass # TODO add if relevant def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" pass # TODO add if relevant def lowerCamelCase ( self : Dict): """simple docstring""" pass # TODO add if relevant def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() # Testing tokenization UpperCAmelCase_ = '''こんにちは、世界。 こんばんは、㔺界。''' UpperCAmelCase_ = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。'''] UpperCAmelCase_ = tokenizer.tokenize(_snake_case) self.assertListEqual(_snake_case , _snake_case) # Testing conversion to ids without special tokens UpperCAmelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_snake_case) self.assertListEqual(_snake_case , _snake_case) # Testing conversion to ids with special tokens UpperCAmelCase_ = tokens + [tokenizer.unk_token] UpperCAmelCase_ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_snake_case) self.assertListEqual(_snake_case , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() # Testing tokenization UpperCAmelCase_ = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。''' UpperCAmelCase_ = '''こんにちは、、、、世界。こんばんは、、、、世界。''' UpperCAmelCase_ = tokenizer.encode(_snake_case) UpperCAmelCase_ = tokenizer.decode(_snake_case) self.assertEqual(_snake_case , _snake_case) @slow def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''') # Testing tokenization UpperCAmelCase_ = '''こんにちは、世界。''' UpperCAmelCase_ = '''こんばんは、㔺界。😀''' UpperCAmelCase_ = '''こんにちは、世界。こんばんは、世界。😀''' UpperCAmelCase_ = tokenizer.encode(prefix_text + input_text) UpperCAmelCase_ = tokenizer.encode('''''' , prefix_text=prefix_text + input_text) UpperCAmelCase_ = tokenizer.encode(_snake_case , prefix_text=_snake_case) UpperCAmelCase_ = tokenizer.decode(_snake_case) UpperCAmelCase_ = tokenizer.decode(_snake_case) UpperCAmelCase_ = tokenizer.decode(_snake_case) self.assertEqual(_snake_case , _snake_case) self.assertEqual(_snake_case , _snake_case) self.assertEqual(_snake_case , _snake_case) @slow def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''') # Testing tokenization UpperCAmelCase_ = '''こんにちは、世界。''' UpperCAmelCase_ = '''こんばんは、㔺界。😀''' UpperCAmelCase_ = len(tokenizer.encode(_snake_case)) - 2 UpperCAmelCase_ = len(tokenizer.encode(_snake_case)) - 2 UpperCAmelCase_ = [1] + [0] * (len_prefix + len_text + 1) UpperCAmelCase_ = [1] * (len_prefix + len_text + 1) + [0] UpperCAmelCase_ = [1] + [1] * (len_prefix) + [0] * (len_text + 1) UpperCAmelCase_ = tokenizer(prefix_text + input_text).token_type_ids UpperCAmelCase_ = tokenizer('''''' , prefix_text=prefix_text + input_text).token_type_ids UpperCAmelCase_ = tokenizer(_snake_case , prefix_text=_snake_case).token_type_ids self.assertListEqual(_snake_case , _snake_case) self.assertListEqual(_snake_case , _snake_case) self.assertListEqual(_snake_case , _snake_case) @slow def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''') UpperCAmelCase_ = tokenizer.encode('''あンいワ''') UpperCAmelCase_ = tokenizer.encode('''''' , prefix_text='''あンいワ''') UpperCAmelCase_ = tokenizer.encode('''いワ''' , prefix_text='''あン''') self.assertEqual(tokenizer.decode(_snake_case) , tokenizer.decode(_snake_case)) self.assertEqual(tokenizer.decode(_snake_case) , tokenizer.decode(_snake_case)) self.assertNotEqual(_snake_case , _snake_case) self.assertNotEqual(_snake_case , _snake_case) self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token @slow def lowerCamelCase ( self : Any): """simple docstring""" UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''') UpperCAmelCase_ = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']] UpperCAmelCase_ = tokenizer(_snake_case , padding=_snake_case) UpperCAmelCase_ = tokenizer.batch_encode_plus(_snake_case , padding=_snake_case) # fmt: off UpperCAmelCase_ = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]] UpperCAmelCase_ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] UpperCAmelCase_ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , _snake_case) self.assertListEqual(x_token.token_type_ids , _snake_case) self.assertListEqual(x_token.attention_mask , _snake_case) self.assertListEqual(x_token_a.input_ids , _snake_case) self.assertListEqual(x_token_a.token_type_ids , _snake_case) self.assertListEqual(x_token_a.attention_mask , _snake_case) def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass def lowerCamelCase ( self : str): """simple docstring""" pass
51
'''simple docstring''' import math def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = input("Enter message: " ) UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) ) UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " ) if mode.lower().startswith("e" ): UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ ) elif mode.lower().startswith("d" ): UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + "|"}""" ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = [""] * key for col in range(snake_case_ ): UpperCAmelCase_ = col while pointer < len(snake_case_ ): cipher_text[col] += message[pointer] pointer += key return "".join(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key ) UpperCAmelCase_ = key UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ ) UpperCAmelCase_ = [""] * num_cols UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase_ = 0 row += 1 return "".join(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger() SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] , __a : str ): os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = {"source": "What is love ?", "target": "life"} UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f: f.write(__a ) def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = os.path.join(__a , "output" ) UpperCAmelCase_ = os.path.join(__a , "data" ) self._create_dummy_data(data_dir=__a ) UpperCAmelCase_ = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__a , env=self.get_env() ) UpperCAmelCase_ = os.path.join(__a , "metrics.json" ) with open(__a ) as f: UpperCAmelCase_ = json.load(__a ) return result @require_torch_gpu def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def _lowercase (self : Dict ): UpperCAmelCase_ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def _lowercase (self : Any ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
1
0
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase__ ( __lowercase : Features ) -> Optional[int]: """simple docstring""" __UpperCamelCase = np.inf def set_batch_size(__lowercase : FeatureType ) -> None: nonlocal batch_size if isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__lowercase , __lowercase ): __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__lowercase , __lowercase ) and feature.dtype == "binary": __UpperCamelCase = min(__lowercase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__lowercase , __lowercase ) return None if batch_size is np.inf else batch_size class snake_case ( __lowerCamelCase ): """simple docstring""" def __init__( self : List[str] , __A : NestedDataStructureLike[PathLike] , __A : Optional[NamedSplit] = None , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[int] = None , **__A : Dict , ): super().__init__( __A , split=__A , features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) __UpperCamelCase = path_or_paths if isinstance(__A , __A ) else {self.split: path_or_paths} __UpperCamelCase = _PACKAGED_DATASETS_MODULES['parquet'][1] __UpperCamelCase = Parquet( cache_dir=__A , data_files=__A , features=__A , hash=__A , **__A , ) def _lowerCamelCase ( self : Optional[int] ): # Build iterable dataset if self.streaming: __UpperCamelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) __UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset class snake_case : """simple docstring""" def __init__( self : List[str] , __A : Dataset , __A : Union[PathLike, BinaryIO] , __A : Optional[int] = None , **__A : Dict , ): __UpperCamelCase = dataset __UpperCamelCase = path_or_buf __UpperCamelCase = batch_size or get_writer_batch_size(dataset.features ) __UpperCamelCase = parquet_writer_kwargs def _lowerCamelCase ( self : Optional[int] ): __UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __UpperCamelCase = self._write(file_obj=__A , batch_size=__A , **self.parquet_writer_kwargs ) else: __UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__A , **self.parquet_writer_kwargs ) return written def _lowerCamelCase ( self : List[str] , __A : BinaryIO , __A : int , **__A : List[str] ): __UpperCamelCase = 0 __UpperCamelCase = parquet_writer_kwargs.pop('path_or_buf' , __A ) __UpperCamelCase = self.dataset.features.arrow_schema __UpperCamelCase = pq.ParquetWriter(__A , schema=__A , **__A ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __A ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __UpperCamelCase = query_table( table=self.dataset._data , key=slice(__A , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__A ) written += batch.nbytes writer.close() return written
53
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time SCREAMING_SNAKE_CASE_: Optional[int] =Lock() def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(snake_case_ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() UpperCAmelCase_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left UpperCAmelCase_ = min(snake_case_ , snake_case_ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(snake_case_ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() UpperCAmelCase_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right UpperCAmelCase_ = max(snake_case_ , snake_case_ ) # after all swaps are performed, send the values back to main result_pipe[1].send(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr for i in range(1 , len(snake_case_ ) - 1 ): UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr process_array_.append( Process( target=snake_case_ , args=( len(snake_case_ ) - 1, arr[len(snake_case_ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(snake_case_ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(snake_case_ ) ): UpperCAmelCase_ = result_pipe[p][0].recv() process_array_[p].join() return arr def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*snake_case_ ) UpperCAmelCase_ = odd_even_transposition(snake_case_ ) print("Sorted List\n" ) print(*snake_case_ ) if __name__ == "__main__": main()
1
0
"""simple docstring""" import warnings warnings.warn( '''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: ''' '''`from accelerate import find_executable_batch_size` to avoid this warning.''', FutureWarning, )
54
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b" UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class snake_case : """simple docstring""" @staticmethod def snake_case ( *UpperCamelCase , **UpperCamelCase ): """simple docstring""" pass def __snake_case ( UpperCAmelCase_ : Image ): lowerCamelCase_ = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class snake_case ( unittest.TestCase ): """simple docstring""" _lowerCamelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = DepthEstimationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" ) self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , UpperCamelCase ) import datasets lowerCamelCase_ = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) lowerCamelCase_ = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, {"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )}, ] , UpperCamelCase , ) @require_tf @unittest.skip("Depth estimation is not implemented in TF" ) def snake_case ( self ): """simple docstring""" pass @slow @require_torch def snake_case ( self ): """simple docstring""" lowerCamelCase_ = "Intel/dpt-large" lowerCamelCase_ = pipeline("depth-estimation" , model=UpperCamelCase ) lowerCamelCase_ = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" ) lowerCamelCase_ = hashimage(outputs["depth"] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 ) @require_torch def snake_case ( self ): """simple docstring""" # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
55
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None: '''simple docstring''' if start is None: UpperCAmelCase_ = 0 if end is None: UpperCAmelCase_ = len(snake_case_ ) - 1 if start >= end: return UpperCAmelCase_ = (start + end) // 2 slowsort(snake_case_ , snake_case_ , snake_case_ ) slowsort(snake_case_ , mid + 1 , snake_case_ ) if sequence[end] < sequence[mid]: UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end] slowsort(snake_case_ , snake_case_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
1
0
'''simple docstring''' a : Optional[Any] = { 'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...', '8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.', ':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.', '?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-', '(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/' } # Exclamation mark is not in ITU-R recommendation # fmt: on a : Optional[Any] = {value: key for key, value in MORSE_CODE_DICT.items()} def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def __magic_name__ ( __UpperCAmelCase ) -> str: '''simple docstring''' return "".join(REVERSE_DICT[char] for char in message.split() ) def __magic_name__ ( ) -> None: '''simple docstring''' snake_case_ = '''Morse code here!''' print(__UpperCAmelCase ) snake_case_ = encrypt(__UpperCAmelCase ) print(__UpperCAmelCase ) snake_case_ = decrypt(__UpperCAmelCase ) print(__UpperCAmelCase ) if __name__ == "__main__": main()
56
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __A ( UpperCamelCase__ ): a__ : Optional[Any] = DistilBertTokenizer a__ : Any = DistilBertTokenizerFast a__ : str = True @slow def _lowercase (self : int ): UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a ) UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
1
0
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' def __init__( self , __a , __a = None , __a = None , __a = False , __a = False , __a = None , __a = None , **__a , ): super().__init__( features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , ) __lowerCAmelCase = Generator( cache_dir=__a , features=__a , generator=__a , gen_kwargs=__a , **__a , ) def snake_case ( self ): # Build iterable dataset if self.streaming: __lowerCAmelCase = self.builder.as_streaming_dataset(split="train" ) # Build regular (map-style) dataset else: __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = None self.builder.download_and_prepare( download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , ) __lowerCAmelCase = self.builder.as_dataset( split="train" , verification_mode=__a , in_memory=self.keep_in_memory ) return dataset
57
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE_: Tuple =[] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) UpperCAmelCase_ = value else: UpperCAmelCase_ = value return new_state_dict def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "" if is_panoptic: UpperCAmelCase_ = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:2_56, :] UpperCAmelCase_ = in_proj_bias[:2_56] UpperCAmelCase_ = in_proj_weight[2_56:5_12, :] UpperCAmelCase_ = in_proj_bias[2_56:5_12] UpperCAmelCase_ = in_proj_weight[-2_56:, :] UpperCAmelCase_ = in_proj_bias[-2_56:] def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCAmelCase_ = "resnet101" if "dc5" in model_name: UpperCAmelCase_ = True UpperCAmelCase_ = "panoptic" in model_name if is_panoptic: UpperCAmelCase_ = 2_50 else: UpperCAmelCase_ = 91 UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "coco-detection-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} # load image processor UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection" UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ ) # prepare image UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" ) UpperCAmelCase_ = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval() UpperCAmelCase_ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCAmelCase_ = "conditional_detr." + src rename_key(snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ = rename_backbone_keys(snake_case_ ) # query, key and value matrices need special treatment read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val # finally, create HuggingFace model and load state dict UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion UpperCAmelCase_ = conditional_detr(snake_case_ ) UpperCAmelCase_ = model(snake_case_ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
1
0
'''simple docstring''' import math def lowerCamelCase ( __lowerCamelCase : int ) ->bool: _SCREAMING_SNAKE_CASE = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : float = 1 / 1_2345 ) ->int: _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 3 while True: _SCREAMING_SNAKE_CASE = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__lowerCamelCase ): _SCREAMING_SNAKE_CASE = int(__lowerCamelCase ) total_partitions += 1 if check_partition_perfect(__lowerCamelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__lowerCamelCase ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
58
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): def __init__(self : int , *__a : Dict , **__a : str ): warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
1
0
import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def UpperCamelCase ( __lowerCamelCase : List[str] ): snake_case : List[str] = [] for line in lines: snake_case : List[Any] = re.sub(r"#.*" , "" , __lowerCamelCase ) # remove comments if line: filtered_lines.append(__lowerCamelCase ) snake_case : Optional[Any] = "\n".join(__lowerCamelCase ) # Make a hash from all this code snake_case : Tuple = full_str.encode("utf-8" ) return shaaaa(__lowerCamelCase ).hexdigest() # get importable module names and hash for caching __lowerCamelCase = { """csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), """json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), """pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), """parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), """arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), """text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), """imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), """audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions __lowerCamelCase = { """.csv""": ("""csv""", {}), """.tsv""": ("""csv""", {"""sep""": """\t"""}), """.json""": ("""json""", {}), """.jsonl""": ("""json""", {}), """.parquet""": ("""parquet""", {}), """.arrow""": ("""arrow""", {}), """.txt""": ("""text""", {}), } _EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) __lowerCamelCase = {"""imagefolder""", """audiofolder"""} # Used to filter data files based on extensions given a module name __lowerCamelCase = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""") _MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
59
'''simple docstring''' from __future__ import annotations import queue class __A : def __init__(self : Optional[Any] , __a : str ): UpperCAmelCase_ = data UpperCAmelCase_ = None UpperCAmelCase_ = None def lowerCAmelCase_ ( ) -> TreeNode: '''simple docstring''' print("\n********Press N to stop entering at any point of time********\n" ) UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower() UpperCAmelCase_ = queue.Queue() UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = q.get() UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """ UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) UpperCAmelCase_ = left_node q.put(snake_case_ ) UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """ UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) UpperCAmelCase_ = right_node q.put(snake_case_ ) raise def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = queue.Queue() q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = queue.Queue() q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = [] while not q.empty(): UpperCAmelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = [] UpperCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(snake_case_ ) UpperCAmelCase_ = n.left # end of while means current node doesn't have left child UpperCAmelCase_ = stack.pop() # start to traverse its right child UpperCAmelCase_ = n.right def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = [] UpperCAmelCase_ = node while n or stack: while n: stack.append(snake_case_ ) UpperCAmelCase_ = n.left UpperCAmelCase_ = stack.pop() print(n.data , end="," ) UpperCAmelCase_ = n.right def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ , UpperCAmelCase_ = [], [] UpperCAmelCase_ = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str: '''simple docstring''' if not s: return "\n" + width * char UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) SCREAMING_SNAKE_CASE_: TreeNode =build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
1
0
"""simple docstring""" import os import string import sys snake_case__ : Optional[int] = 1 << 8 snake_case__ : Union[str, Any] = { '''tab''': ord('''\t'''), '''newline''': ord('''\r'''), '''esc''': 27, '''up''': 65 + ARROW_KEY_FLAG, '''down''': 66 + ARROW_KEY_FLAG, '''right''': 67 + ARROW_KEY_FLAG, '''left''': 68 + ARROW_KEY_FLAG, '''mod_int''': 91, '''undefined''': sys.maxsize, '''interrupt''': 3, '''insert''': 50, '''delete''': 51, '''pg_up''': 53, '''pg_down''': 54, } snake_case__ : Optional[int] = KEYMAP['''up'''] snake_case__ : Tuple = KEYMAP['''left'''] if sys.platform == "win32": snake_case__ : Dict = [] snake_case__ : List[Any] = { B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG, B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG, B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG, B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG, } for i in range(10): snake_case__ : Dict = ord(str(i)) def _snake_case ( ): if os.name == "nt": import msvcrt lowerCAmelCase : Optional[int] = '''mbcs''' # Flush the keyboard buffer while msvcrt.kbhit(): msvcrt.getch() if len(_snake_case ) == 0: # Read the keystroke lowerCAmelCase : List[str] = msvcrt.getch() # If it is a prefix char, get second part if ch in (b"\x00", b"\xe0"): lowerCAmelCase : Optional[Any] = ch + msvcrt.getch() # Translate actual Win chars to bullet char types try: lowerCAmelCase : List[str] = chr(WIN_KEYMAP[cha] ) WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) ) WIN_CH_BUFFER.append(_snake_case ) if ord(_snake_case ) in ( KEYMAP["insert"] - 1 << 9, KEYMAP["delete"] - 1 << 9, KEYMAP["pg_up"] - 1 << 9, KEYMAP["pg_down"] - 1 << 9, ): WIN_CH_BUFFER.append(chr(126 ) ) lowerCAmelCase : str = chr(KEYMAP['''esc'''] ) except KeyError: lowerCAmelCase : List[str] = cha[1] else: lowerCAmelCase : Optional[Any] = ch.decode(_snake_case ) else: lowerCAmelCase : str = WIN_CH_BUFFER.pop(0 ) elif os.name == "posix": import termios import tty lowerCAmelCase : Any = sys.stdin.fileno() lowerCAmelCase : List[str] = termios.tcgetattr(_snake_case ) try: tty.setraw(_snake_case ) lowerCAmelCase : Union[str, Any] = sys.stdin.read(1 ) finally: termios.tcsetattr(_snake_case , termios.TCSADRAIN , _snake_case ) return ch def _snake_case ( ): lowerCAmelCase : Dict = get_raw_chars() if ord(_snake_case ) in [KEYMAP["interrupt"], KEYMAP["newline"]]: return char elif ord(_snake_case ) == KEYMAP["esc"]: lowerCAmelCase : Union[str, Any] = get_raw_chars() if ord(_snake_case ) == KEYMAP["mod_int"]: lowerCAmelCase : Tuple = get_raw_chars() if ord(_snake_case ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_snake_case ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG: return chr(ord(_snake_case ) + ARROW_KEY_FLAG ) else: return KEYMAP["undefined"] else: return get_raw_chars() else: if char in string.printable: return char else: return KEYMAP["undefined"]
60
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase__ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class __A ( UpperCamelCase__ ): def _lowercase (self : str , __a : GenericTensor ): if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ) else: raise ValueError("Unsupported framework" ) return masked_index def _lowercase (self : Tuple , __a : GenericTensor ): UpperCAmelCase_ = self.get_masked_index(__a ) UpperCAmelCase_ = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def _lowercase (self : List[Any] , __a : GenericTensor ): if isinstance(__a , __a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__a ) def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ): if return_tensors is None: UpperCAmelCase_ = self.framework UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a ) self.ensure_exactly_one_mask_token(__a ) return model_inputs def _lowercase (self : str , __a : Optional[int] ): UpperCAmelCase_ = self.model(**__a ) UpperCAmelCase_ = model_inputs["input_ids"] return model_outputs def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: UpperCAmelCase_ = target_ids.shape[0] UpperCAmelCase_ = model_outputs["input_ids"][0] UpperCAmelCase_ = model_outputs["logits"] if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] UpperCAmelCase_ = outputs.numpy() UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = stable_softmax(__a , axis=-1 ) if target_ids is not None: UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) ) UpperCAmelCase_ = tf.expand_dims(__a , 0 ) UpperCAmelCase_ = tf.math.top_k(__a , k=__a ) UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy() else: UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = logits.softmax(dim=-1 ) if target_ids is not None: UpperCAmelCase_ = probs[..., target_ids] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a ) UpperCAmelCase_ = [] UpperCAmelCase_ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): UpperCAmelCase_ = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place UpperCAmelCase_ = input_ids.numpy().copy() if target_ids is not None: UpperCAmelCase_ = target_ids[p].tolist() UpperCAmelCase_ = p # Filter padding out: UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a ) UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(__a ) result.append(__a ) if single_mask: return result[0] return result def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ): if isinstance(__a , __a ): UpperCAmelCase_ = [targets] try: UpperCAmelCase_ = self.tokenizer.get_vocab() except Exception: UpperCAmelCase_ = {} UpperCAmelCase_ = [] for target in targets: UpperCAmelCase_ = vocab.get(__a , __a ) if id_ is None: UpperCAmelCase_ = self.tokenizer( __a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"] if len(__a ) == 0: logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ "We cannot replace it with anything meaningful, ignoring it" ) continue UpperCAmelCase_ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) UpperCAmelCase_ = list(set(__a ) ) if len(__a ) == 0: raise ValueError("At least one target must be provided when passed." ) UpperCAmelCase_ = np.array(__a ) return target_ids def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ): UpperCAmelCase_ = {} if targets is not None: UpperCAmelCase_ = self.get_target_ids(__a , __a ) UpperCAmelCase_ = target_ids if top_k is not None: UpperCAmelCase_ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ): UpperCAmelCase_ = super().__call__(__a , **__a ) if isinstance(__a , __a ) and len(__a ) == 1: return outputs[0] return outputs
1
0
"""simple docstring""" from __future__ import annotations from collections import deque class A_ : '''simple docstring''' def __init__( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : list[dict] = [] self.adlist.append( {"value": "", "next_states": [], "fail_state": 0, "output": []} ) for keyword in keywords: self.add_keyword(lowercase_ ) self.set_fail_transitions() def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 for character in keyword: UpperCAmelCase_ : int = self.find_next_state(lowercase_ , lowercase_ ) if next_state is None: self.adlist.append( { "value": character, "next_states": [], "fail_state": 0, "output": [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) UpperCAmelCase_ : int = len(self.adlist ) - 1 else: UpperCAmelCase_ : Optional[Any] = next_state self.adlist[current_state]["output"].append(lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : deque = deque() for node in self.adlist[0]["next_states"]: q.append(lowercase_ ) UpperCAmelCase_ : Dict = 0 while q: UpperCAmelCase_ : List[Any] = q.popleft() for child in self.adlist[r]["next_states"]: q.append(lowercase_ ) UpperCAmelCase_ : Dict = self.adlist[r]["fail_state"] while ( self.find_next_state(lowercase_ , self.adlist[child]["value"] ) is None and state != 0 ): UpperCAmelCase_ : Dict = self.adlist[state]["fail_state"] UpperCAmelCase_ : Dict = self.find_next_state( lowercase_ , self.adlist[child]["value"] ) if self.adlist[child]["fail_state"] is None: UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : Optional[Any] = ( self.adlist[child]["output"] + self.adlist[self.adlist[child]["fail_state"]]["output"] ) def UpperCamelCase__ ( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : dict = {} # returns a dict with keywords and list of its occurrences UpperCAmelCase_ : Any = 0 for i in range(len(lowercase_ ) ): while ( self.find_next_state(lowercase_ , string[i] ) is None and current_state != 0 ): UpperCAmelCase_ : Any = self.adlist[current_state]["fail_state"] UpperCAmelCase_ : Dict = self.find_next_state(lowercase_ , string[i] ) if next_state is None: UpperCAmelCase_ : Union[str, Any] = 0 else: UpperCAmelCase_ : Dict = next_state for key in self.adlist[current_state]["output"]: if key not in result: UpperCAmelCase_ : Union[str, Any] = [] result[key].append(i - len(lowercase_ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
61
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__) @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : str a__ : str a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : List[int] a__ : Optional[List[int]] = None a__ : Optional[List[int]] = None a__ : Optional[Union[int, float]] = None a__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __A ( UpperCamelCase__ ): a__ : List[InputFeatures] def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = os.path.join( __a , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , ) UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase_ = cached_features_file + ".lock" with FileLock(__a ): if os.path.exists(__a ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) UpperCAmelCase_ = torch.load(__a ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) UpperCAmelCase_ = ( processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) ) logger.info("Training examples: %s" , len(__a ) ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) logger.info("Saving features into cached file %s" , __a ) torch.save(self.features , __a ) def __len__(self : List[Any] ): return len(self.features ) def __getitem__(self : Any , __a : Optional[Any] ): return self.features[i] def _lowercase (self : Union[str, Any] ): return self.label_list if is_tf_available(): import tensorflow as tf class __A : a__ : List[InputFeatures] def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(__a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) UpperCAmelCase_ = tf.data.Dataset.from_generator( __a , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _lowercase (self : int ): return self.dataset def __len__(self : Any ): return len(self.features ) def __getitem__(self : int , __a : Union[str, Any] ): return self.features[i] def _lowercase (self : int ): return self.label_list class __A ( UpperCamelCase__ ): def _lowercase (self : List[Any] , __a : Dict ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" ) def _lowercase (self : Any , __a : List[Any] ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" ) def _lowercase (self : Any ): return ["contradiction", "entailment", "neutral"] def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ): UpperCAmelCase_ = [] for i, line in enumerate(__a ): if i == 0: continue UpperCAmelCase_ = "%s-%s" % (set_type, line[0]) UpperCAmelCase_ = line[5] UpperCAmelCase_ = line[6] UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7] UpperCAmelCase_ = line[0] examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) ) return examples def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )} UpperCAmelCase_ = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ): if ex_index % 1_00_00 == 0: logger.info("Writing example %d" % (ex_index) ) UpperCAmelCase_ = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0 UpperCAmelCase_ = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE_: int ={ 'hans': 3, } SCREAMING_SNAKE_CASE_: Any ={ 'hans': HansProcessor, }
1
0
import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _A = 16 _A = 32 def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Accelerator , SCREAMING_SNAKE_CASE__ : DatasetDict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : int = 16 ): __UpperCamelCase =AutoTokenizer.from_pretrained('bert-base-cased' ) __UpperCamelCase =DatasetDict( { 'train': dataset['train'].select(SCREAMING_SNAKE_CASE__ ), 'validation': dataset['train'].select(SCREAMING_SNAKE_CASE__ ), 'test': dataset['validation'], } ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCamelCase =datasets.map( SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase =tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(SCREAMING_SNAKE_CASE__ : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCamelCase =16 elif accelerator.mixed_precision != "no": __UpperCamelCase =8 else: __UpperCamelCase =None return tokenizer.pad( SCREAMING_SNAKE_CASE__ , padding='longest' , max_length=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_tensors='pt' , ) # Instantiate dataloaders. __UpperCamelCase =DataLoader( tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader( tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =DataLoader( tokenized_datasets['test'] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader, test_dataloader def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ): # New Code # __UpperCamelCase =[] # Download the dataset __UpperCamelCase =load_dataset('glue' , 'mrpc' ) # Create our splits __UpperCamelCase =StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator __UpperCamelCase =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase =config['lr'] __UpperCamelCase =int(config['num_epochs'] ) __UpperCamelCase =int(config['seed'] ) __UpperCamelCase =int(config['batch_size'] ) __UpperCamelCase =evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation __UpperCamelCase =1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCamelCase =batch_size // MAX_GPU_BATCH_SIZE __UpperCamelCase =MAX_GPU_BATCH_SIZE set_seed(SCREAMING_SNAKE_CASE__ ) # New Code # # Create our folds: __UpperCamelCase =kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] ) __UpperCamelCase =[] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =get_fold_dataloaders( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=SCREAMING_SNAKE_CASE__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase =model.to(accelerator.device ) # Instantiate optimizer __UpperCamelCase =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ ) # Instantiate scheduler __UpperCamelCase =get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =accelerator.prepare( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Now we train the model for epoch in range(SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =outputs.loss __UpperCamelCase =loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =outputs.logits.argmax(dim=-1 ) __UpperCamelCase , __UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , ) __UpperCamelCase =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , SCREAMING_SNAKE_CASE__ ) # New Code # # We also run predictions on the test set at the very end __UpperCamelCase =[] for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase =model(**SCREAMING_SNAKE_CASE__ ) __UpperCamelCase =outputs.logits __UpperCamelCase , __UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['labels']) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: __UpperCamelCase =torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 ) __UpperCamelCase =torch.stack(SCREAMING_SNAKE_CASE__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) __UpperCamelCase =metric.compute(predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ ) accelerator.print('Average test metrics from all folds:' , SCREAMING_SNAKE_CASE__ ) def _UpperCAmelCase ( ): __UpperCamelCase =argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) # New Code # parser.add_argument('--num_folds' , type=SCREAMING_SNAKE_CASE__ , default=3 , help='The number of splits to perform across the dataset' ) __UpperCamelCase =parser.parse_args() __UpperCamelCase ={'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
62
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Tuple ={} class __A ( UpperCamelCase__ ): a__ : int = """llama""" a__ : Any = ["""past_key_values"""] def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = hidden_size UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads # for backward compatibility if num_key_value_heads is None: UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = num_key_value_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = initializer_range UpperCAmelCase_ = rms_norm_eps UpperCAmelCase_ = pretraining_tp UpperCAmelCase_ = use_cache UpperCAmelCase_ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def _lowercase (self : List[str] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) UpperCAmelCase_ = self.rope_scaling.get("type" , __a ) UpperCAmelCase_ = self.rope_scaling.get("factor" , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
1
0
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def _lowerCamelCase ( lowercase : str , lowercase : str , lowercase : str , lowercase : PreTrainedTokenizer , lowercase : int , lowercase : Optional[int] = None , ) -> List[str]: _a = {} if train_file is not None: _a = [train_file] if eval_file is not None: _a = [eval_file] if test_file is not None: _a = [test_file] _a = datasets.load_dataset("csv" , data_files=lowercase ) _a = list(ds[list(files.keys() )[0]].features.keys() ) _a = features_name.pop(lowercase ) _a = list(set(ds[list(files.keys() )[0]][label_name] ) ) _a = {label: i for i, label in enumerate(lowercase )} _a = tokenizer.model_input_names _a = {} if len(lowercase ) == 1: for k in files.keys(): _a = ds[k].map( lambda lowercase : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=lowercase , max_length=lowercase , padding="max_length" ) , batched=lowercase , ) elif len(lowercase ) == 2: for k in files.keys(): _a = ds[k].map( lambda lowercase : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=lowercase , max_length=lowercase , padding="max_length" , ) , batched=lowercase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: _a = {k: v for k, v in ex.items() if k in input_names} _a = labelaid[ex[label_name]] yield (d, label) _a = ( tf.data.Dataset.from_generator( lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: _a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) _a = ( tf.data.Dataset.from_generator( lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: _a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) _a = ( tf.data.Dataset.from_generator( lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: _a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase_ : Optional[Any] = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =field(metadata={'help': 'Which column contains the label'} ) __a =field(default=lowerCamelCase_ , metadata={'help': 'The path of the training file'} ) __a =field(default=lowerCamelCase_ , metadata={'help': 'The path of the development file'} ) __a =field(default=lowerCamelCase_ , metadata={'help': 'The path of the test file'} ) __a =field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) __a =field( default=lowerCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __a =field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) __a =field( default=lowerCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) __a =field( default=lowerCamelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) __a =field(default=lowerCamelCase_ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __a =field( default=lowerCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) def _lowerCamelCase ( ) -> Any: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) _a , _a , _a = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info( F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' F'16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) _a , _a , _a , _a = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) _a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): _a = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , ) def compute_metrics(lowercase : EvalPrediction ) -> Dict: _a = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer _a = TFTrainer( model=lowercase , args=lowercase , train_dataset=lowercase , eval_dataset=lowercase , compute_metrics=lowercase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _a = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) _a = trainer.evaluate() _a = os.path.join(training_args.output_dir , "eval_results.txt" ) with open(lowercase , "w" ) as writer: logger.info("***** Eval results *****" ) for key, value in result.items(): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) results.update(lowercase ) return results if __name__ == "__main__": main()
63
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): def _lowercase (self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase (self : str ): UpperCAmelCase_ = 1 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def _lowercase (self : int ): torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _lowercase (self : Any ): torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowercase (self : Optional[Any] ): torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__a ) def _lowercase (self : Any ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0] UpperCAmelCase_ = image[0, -3:, -3:, -1] UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase (self : Optional[int] ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowercase (self : str ): UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCAmelCase_ = unet.half() UpperCAmelCase_ = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : List[Any] ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _lowercase (self : Tuple ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _lowercase (self : List[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , ) UpperCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
1
0
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class lowercase: '''simple docstring''' def __init__( self: Union[str, Any], a_: Optional[Any], a_: int, a_: int ): '''simple docstring''' if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""" ) _snake_case : Dict = img _snake_case : Union[str, Any] = img.shape[1] _snake_case : int = img.shape[0] _snake_case : int = dst_width _snake_case : Tuple = dst_height _snake_case : Any = self.src_w / self.dst_w _snake_case : Union[str, Any] = self.src_h / self.dst_h _snake_case : Optional[int] = ( np.ones((self.dst_h, self.dst_w, 3), np.uinta ) * 255 ) def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' for i in range(self.dst_h ): for j in range(self.dst_w ): _snake_case : Dict = self.img[self.get_y(a_ )][self.get_x(a_ )] def UpperCamelCase_ ( self: List[str], a_: int ): '''simple docstring''' return int(self.ratio_x * x ) def UpperCamelCase_ ( self: Optional[Any], a_: int ): '''simple docstring''' return int(self.ratio_y * y ) if __name__ == "__main__": A_ , A_ = 8_00, 6_00 A_ = imread('''image_data/lena.jpg''', 1) A_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output ) waitKey(0) destroyAllWindows()
64
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __A ( UpperCamelCase__ ): def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ): UpperCAmelCase_ = 1.0 if scale is None else scale UpperCAmelCase_ = 0.0 if loc is None else loc super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] ) @property def _lowercase (self : Union[str, Any] ): return self.base_dist.mean * self.scale + self.loc @property def _lowercase (self : List[Any] ): return self.base_dist.variance * self.scale**2 @property def _lowercase (self : List[Any] ): return self.variance.sqrt() class __A ( nn.Module ): def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ): super().__init__(**__a ) UpperCAmelCase_ = args_dim UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] ) UpperCAmelCase_ = domain_map def _lowercase (self : List[str] , __a : torch.Tensor ): UpperCAmelCase_ = [proj(__a ) for proj in self.proj] return self.domain_map(*__a ) class __A ( nn.Module ): def __init__(self : Union[str, Any] , __a : List[str] ): super().__init__() UpperCAmelCase_ = function def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ): return self.function(__a , *__a ) class __A : a__ : type a__ : int a__ : Dict[str, int] def __init__(self : List[Any] , __a : int = 1 ): UpperCAmelCase_ = dim UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim} def _lowercase (self : Any , __a : Any ): if self.dim == 1: return self.distribution_class(*__a ) else: return Independent(self.distribution_class(*__a ) , 1 ) def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ): UpperCAmelCase_ = self._base_distribution(__a ) if loc is None and scale is None: return distr else: return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim ) @property def _lowercase (self : Any ): return () if self.dim == 1 else (self.dim,) @property def _lowercase (self : Dict ): return len(self.event_shape ) @property def _lowercase (self : Tuple ): return 0.0 def _lowercase (self : List[str] , __a : int ): return ParameterProjection( in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _lowercase (self : Optional[int] , *__a : torch.Tensor ): raise NotImplementedError() @staticmethod def _lowercase (__a : torch.Tensor ): return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0 class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} a__ : type = StudentT @classmethod def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCAmelCase_ = 2.0 + cls.squareplus(__a ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"loc": 1, "scale": 1} a__ : type = Normal @classmethod def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"total_count": 1, "logits": 1} a__ : type = NegativeBinomial @classmethod def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _lowercase (self : List[str] , __a : str ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if self.dim == 1: return self.distribution_class(total_count=__a , logits=__a ) else: return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 ) def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
1
0
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class A ( UpperCAmelCase_ ): def __init__(self : Any , *__UpperCAmelCase : Any , **__UpperCAmelCase : Tuple ) -> None: """simple docstring""" warnings.warn( "The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use GLPNImageProcessor instead." , __UpperCAmelCase , ) super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
65
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def _lowercase (self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ): UpperCAmelCase_ = 0.0 for i, j in zip(__a , __a ): n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0 UpperCAmelCase_ = n_correct / len(__a ) return { "accuracy": accuracy, }
1
0
"""simple docstring""" def A_ ( _lowercase ): '''simple docstring''' return [ txt[:a] + txt[a].upper() + txt[a + 1 :] for a in range(len(_lowercase ) ) if txt[a].isalpha() ] if __name__ == "__main__": __import__("doctest").testmod()
66
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]: '''simple docstring''' model.train() UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict: '''simple docstring''' set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(snake_case_ ) UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ )) if accelerator.num_processes > 1: check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ = RegressionDataset(length=96 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if iteration < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if batch_num < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(snake_case_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(snake_case_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case_ , snake_case_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Dict ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
1
0
'''simple docstring''' from __future__ import annotations def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , ) -> tuple[int, float, str]: __lowerCamelCase = cipher_alphabet or [chr(UpperCamelCase__ ) for i in range(97 , 1_23 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) __lowerCamelCase = { '''a''': 0.0_8_4_9_7, '''b''': 0.0_1_4_9_2, '''c''': 0.0_2_2_0_2, '''d''': 0.0_4_2_5_3, '''e''': 0.1_1_1_6_2, '''f''': 0.0_2_2_2_8, '''g''': 0.0_2_0_1_5, '''h''': 0.0_6_0_9_4, '''i''': 0.0_7_5_4_6, '''j''': 0.0_0_1_5_3, '''k''': 0.0_1_2_9_2, '''l''': 0.0_4_0_2_5, '''m''': 0.0_2_4_0_6, '''n''': 0.0_6_7_4_9, '''o''': 0.0_7_5_0_7, '''p''': 0.0_1_9_2_9, '''q''': 0.0_0_0_9_5, '''r''': 0.0_7_5_8_7, '''s''': 0.0_6_3_2_7, '''t''': 0.0_9_3_5_6, '''u''': 0.0_2_7_5_8, '''v''': 0.0_0_9_7_8, '''w''': 0.0_2_5_6_0, '''x''': 0.0_0_1_5_0, '''y''': 0.0_1_9_9_4, '''z''': 0.0_0_0_7_7, } else: # Custom frequencies dictionary __lowerCamelCase = frequencies_dict if not case_sensitive: __lowerCamelCase = ciphertext.lower() # Chi squared statistic values __lowerCamelCase = {} # cycle through all of the shifts for shift in range(len(UpperCamelCase__ ) ): __lowerCamelCase = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet __lowerCamelCase = (alphabet_letters.index(letter.lower() ) - shift) % len( UpperCamelCase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter __lowerCamelCase = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: __lowerCamelCase = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message __lowerCamelCase = decrypted_with_shift.lower().count(UpperCamelCase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowerCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowerCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message __lowerCamelCase = decrypted_with_shift.count(UpperCamelCase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies __lowerCamelCase = frequencies[letter] * occurrences # Complete the chi squared statistic formula __lowerCamelCase = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary __lowerCamelCase = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(UpperCamelCase__ ) -> tuple[float, str]: return chi_squared_statistic_values[key] __lowerCamelCase = min( UpperCamelCase__ , key=UpperCamelCase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
67
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(snake_case_ , x % y ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int: '''simple docstring''' UpperCAmelCase_ = 1 for i in range(1 , n + 1 ): UpperCAmelCase_ = lcm(snake_case_ , snake_case_ ) return g if __name__ == "__main__": print(f"{solution() = }")
1
0
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") lowerCAmelCase__ = {"""target_lang""": """fi""", """source_lang""": """en"""} lowerCAmelCase__ = """>>zh<<""" lowerCAmelCase__ = """Helsinki-NLP/""" if is_torch_available(): lowerCAmelCase__ = """pt""" elif is_tf_available(): lowerCAmelCase__ = """tf""" else: lowerCAmelCase__ = """jax""" @require_sentencepiece class a__ ( snake_case , unittest.TestCase ): """simple docstring""" __lowerCamelCase = MarianTokenizer __lowerCamelCase = False __lowerCamelCase = True def UpperCamelCase ( self ) -> int: '''simple docstring''' super().setUp() A__ = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] A__ = dict(zip(lowercase , range(len(lowercase ) ) ) ) A__ = Path(self.tmpdirname ) save_json(lowercase , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(lowercase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(lowercase , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(lowercase , save_dir / VOCAB_FILES_NAMES["target_spm"] ) A__ = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , **lowercase ) -> MarianTokenizer: '''simple docstring''' return MarianTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def UpperCamelCase ( self , lowercase ) -> str: '''simple docstring''' return ( "This is a test", "This is a test", ) def UpperCamelCase ( self ) -> Tuple: '''simple docstring''' A__ = "</s>" A__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(lowercase ) , 9 ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def UpperCamelCase ( self ) -> Optional[int]: '''simple docstring''' A__ = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' ) A__ = en_de_tokenizer(["I am a small frog"] , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) A__ = [38, 121, 14, 697, 38848, 0] self.assertListEqual(lowercase , batch.input_ids[0] ) A__ = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(lowercase ) A__ = [x.name for x in Path(lowercase ).glob("*" )] self.assertIn("source.spm" , lowercase ) MarianTokenizer.from_pretrained(lowercase ) def UpperCamelCase ( self ) -> Union[str, Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tok( ["I am a small frog" * 1000, "I am a small frog"] , padding=lowercase , truncation=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = self.get_tokenizer() A__ = tok(["I am a tiny frog", "I am a small frog"] , padding=lowercase , return_tensors=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = {"input_ids": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def UpperCamelCase ( self ) -> str: '''simple docstring''' A__ = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) A__ = "Tämä on testi" A__ = "This is a test" A__ = [76, 7, 2047, 2] A__ = [69, 12, 11, 940, 2] A__ = tokenizer(lowercase ).input_ids self.assertListEqual(lowercase , lowercase ) A__ = tokenizer(text_target=lowercase ).input_ids self.assertListEqual(lowercase , lowercase ) A__ = tokenizer.decode(lowercase , skip_special_tokens=lowercase ) self.assertEqual(lowercase , lowercase )
68
'''simple docstring''' import os from math import logaa def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int: '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ): UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) ) if x * logaa(snake_case_ ) > largest: UpperCAmelCase_ = x * logaa(snake_case_ ) UpperCAmelCase_ = i + 1 return result if __name__ == "__main__": print(solution())
1
0
"""simple docstring""" from __future__ import annotations from collections.abc import Generator def UpperCAmelCase ( ) -> Generator[int, None, None]: snake_case_ = {} snake_case_ = 2 while True: snake_case_ = factor_map.pop(UpperCAmelCase , UpperCAmelCase ) if factor: snake_case_ = factor + prime while x in factor_map: x += factor snake_case_ = factor else: snake_case_ = prime yield prime prime += 1 def UpperCAmelCase ( UpperCAmelCase = 1e10 ) -> int: snake_case_ = sieve() snake_case_ = 1 while True: snake_case_ = next(UpperCAmelCase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(UpperCAmelCase ) n += 2 if __name__ == "__main__": print(solution())
69
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = checkpoint UpperCAmelCase_ = {} UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["quant_conv.bias"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ ) } for i in range(snake_case_ ): UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""" ) UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""" ) UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) for i in range(snake_case_ ): UpperCAmelCase_ = num_up_blocks - 1 - i UpperCAmelCase_ = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) return new_checkpoint def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict: '''simple docstring''' UpperCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCAmelCase_ = io.BytesIO(r.content ) UpperCAmelCase_ = OmegaConf.load(snake_case_ ) UpperCAmelCase_ = 5_12 UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCAmelCase_ = {} with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f: for key in f.keys(): UpperCAmelCase_ = f.get_tensor(snake_case_ ) else: UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"] # Convert the VAE model. UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ ) UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ ) UpperCAmelCase_ = AutoencoderKL(**snake_case_ ) vae.load_state_dict(snake_case_ ) vae.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') SCREAMING_SNAKE_CASE_: str =parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
1
0
'''simple docstring''' import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class UpperCAmelCase ( snake_case_ ): def __init__( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : Union[str, Any]=13 , __snake_case : List[str]=7 , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : str=False , __snake_case : str=True , __snake_case : Union[str, Any]=99 , __snake_case : Union[str, Any]=32 , __snake_case : Optional[int]=5 , __snake_case : Dict=4 , __snake_case : Union[str, Any]=37 , __snake_case : Tuple="gelu" , __snake_case : Tuple=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=5_12 , __snake_case : Tuple=16 , __snake_case : Optional[int]=2 , __snake_case : Tuple=0.02 , __snake_case : Optional[int]=3 , __snake_case : str=4 , __snake_case : Any=None , ) -> Union[str, Any]: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope def lowercase__ ( self : Tuple ) -> Optional[Any]: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _lowerCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : List[str] ) -> Tuple: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def lowercase__ ( self : str , __snake_case : str , __snake_case : List[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : str ) -> Any: _lowerCAmelCase = DistilBertModel(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , __snake_case ) _lowerCAmelCase = model(__snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Tuple , __snake_case : Tuple ) -> Optional[int]: _lowerCAmelCase = DistilBertForMaskedLM(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Any , __snake_case : Any ) -> int: _lowerCAmelCase = DistilBertForQuestionAnswering(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model( __snake_case , attention_mask=__snake_case , start_positions=__snake_case , end_positions=__snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Optional[int] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = self.num_labels _lowerCAmelCase = DistilBertForSequenceClassification(__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : Tuple ) -> str: _lowerCAmelCase = self.num_labels _lowerCAmelCase = DistilBertForTokenClassification(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case , labels=__snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : Optional[Any] , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Tuple ) -> int: _lowerCAmelCase = self.num_choices _lowerCAmelCase = DistilBertForMultipleChoice(config=__snake_case ) model.to(__snake_case ) model.eval() _lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _lowerCAmelCase = model( __snake_case , attention_mask=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : str ) -> Optional[Any]: _lowerCAmelCase = self.prepare_config_and_inputs() ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ): _lowercase: str = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _lowercase: Dict = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _lowercase: Any = True _lowercase: int = True _lowercase: Union[str, Any] = True _lowercase: List[Any] = True def lowercase__ ( self : Dict ) -> Optional[Any]: _lowerCAmelCase = DistilBertModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=__snake_case , dim=37 ) def lowercase__ ( self : List[str] ) -> Optional[Any]: self.config_tester.run_common_tests() def lowercase__ ( self : str ) -> Any: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__snake_case ) def lowercase__ ( self : Tuple ) -> Tuple: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__snake_case ) def lowercase__ ( self : List[Any] ) -> str: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__snake_case ) def lowercase__ ( self : str ) -> List[str]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__snake_case ) def lowercase__ ( self : int ) -> Optional[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__snake_case ) def lowercase__ ( self : Any ) -> Union[str, Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__snake_case ) @slow def lowercase__ ( self : List[str] ) -> List[Any]: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = DistilBertModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) @slow @require_torch_gpu def lowercase__ ( self : List[Any] ) -> Tuple: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return _lowerCAmelCase = True _lowerCAmelCase = model_class(config=__snake_case ) _lowerCAmelCase = self._prepare_for_class(__snake_case , __snake_case ) _lowerCAmelCase = torch.jit.trace( __snake_case , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__snake_case , os.path.join(__snake_case , """traced_model.pt""" ) ) _lowerCAmelCase = torch.jit.load(os.path.join(__snake_case , """traced_model.pt""" ) , map_location=__snake_case ) loaded(inputs_dict["""input_ids"""].to(__snake_case ) , inputs_dict["""attention_mask"""].to(__snake_case ) ) @require_torch class UpperCAmelCase ( unittest.TestCase ): @slow def lowercase__ ( self : Any ) -> List[str]: _lowerCAmelCase = DistilBertModel.from_pretrained("""distilbert-base-uncased""" ) _lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) _lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): _lowerCAmelCase = model(__snake_case , attention_mask=__snake_case )[0] _lowerCAmelCase = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , __snake_case ) _lowerCAmelCase = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) )
70
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __A ( unittest.TestCase ): def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (image_size // patch_size) ** 2 UpperCAmelCase_ = num_patches + 1 def _lowercase (self : Any ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , ) return config, pixel_values def _lowercase (self : Dict , __a : Any , __a : List[Any] ): UpperCAmelCase_ = FlaxViTModel(config=__a ) UpperCAmelCase_ = model(__a ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (self.image_size, self.image_size) UpperCAmelCase_ = (self.patch_size, self.patch_size) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _lowercase (self : Tuple , __a : str , __a : Any ): UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = FlaxViTForImageClassification(config=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = FlaxViTForImageClassification(__a ) UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _lowercase (self : Any ): UpperCAmelCase_ = FlaxViTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(__a , __a ) UpperCAmelCase_ = model_class(__a ) @jax.jit def model_jitted(__a : Tuple , **__a : List[Any] ): return model(pixel_values=__a , **__a ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase (self : Tuple ): for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__a )
1
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A_ :List[str] = { '''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''], '''tokenization_xlm''': ['''XLMTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Tuple = [ '''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMForMultipleChoice''', '''XLMForQuestionAnswering''', '''XLMForQuestionAnsweringSimple''', '''XLMForSequenceClassification''', '''XLMForTokenClassification''', '''XLMModel''', '''XLMPreTrainedModel''', '''XLMWithLMHeadModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ :Dict = [ '''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMForMultipleChoice''', '''TFXLMForQuestionAnsweringSimple''', '''TFXLMForSequenceClassification''', '''TFXLMForTokenClassification''', '''TFXLMMainLayer''', '''TFXLMModel''', '''TFXLMPreTrainedModel''', '''TFXLMWithLMHeadModel''', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys A_ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
71
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 5 # Realm tok UpperCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__a , exist_ok=__a ) def _lowercase (self : Optional[Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def _lowercase (self : Any ): shutil.rmtree(self.tmpdirname ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase (self : List[str] ): UpperCAmelCase_ = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase (self : Any ): UpperCAmelCase_ = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__a , ) return block_records def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _lowercase (self : int ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual([False, True, True] , __a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: UpperCAmelCase_ = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
1
0
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __snake_case ( _lowercase): snake_case__ : str = ["image_processor", "tokenizer"] snake_case__ : int = "LayoutLMv2ImageProcessor" snake_case__ : Dict = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast") def __init__( self : int , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : List[Any]=None , **__lowerCAmelCase : str ): """simple docstring""" if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __lowerCAmelCase , ) _lowerCamelCase : int = kwargs.pop('''feature_extractor''' ) _lowerCamelCase : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__lowerCAmelCase , __lowerCAmelCase ) def __call__( self : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : List[str] , ): """simple docstring""" if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( '''You cannot provide bounding boxes ''' '''if you initialized the image processor with apply_ocr set to True.''' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( '''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' ) if return_overflowing_tokens is True and return_offsets_mapping is False: raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' ) # first, apply the image processor _lowerCamelCase : Dict = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(__lowerCAmelCase , __lowerCAmelCase ): _lowerCamelCase : List[str] = [text] # add batch dimension (as the image processor always adds a batch dimension) _lowerCamelCase : Tuple = features['''words'''] _lowerCamelCase : List[Any] = self.tokenizer( text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , ) # add pixel values _lowerCamelCase : Dict = features.pop('''pixel_values''' ) if return_overflowing_tokens is True: _lowerCamelCase : int = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] ) _lowerCamelCase : str = images return encoded_inputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] ): """simple docstring""" _lowerCamelCase : Dict = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(__lowerCAmelCase ) != len(__lowerCAmelCase ): raise ValueError( '''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got''' f''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' ) return images_with_overflow def SCREAMING_SNAKE_CASE ( self : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[Any] ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str , *__lowerCAmelCase : Union[str, Any] , **__lowerCAmelCase : Optional[int] ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Dict ): """simple docstring""" return ["input_ids", "bbox", "attention_mask", "image"] @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , ) return self.image_processor_class @property def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , ) return self.image_processor
72
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K) def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float: '''simple docstring''' if donor_conc <= 0: raise ValueError("Donor concentration should be positive" ) elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive" ) elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive" ) elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
from math import isqrt def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool: return all(number % divisor != 0 for divisor in range(2 , isqrt(lowerCamelCase__ ) + 1 ) ) def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0**6 ) -> int: __lowerCamelCase : List[str] = 0 __lowerCamelCase : List[str] = 1 __lowerCamelCase : Optional[int] = 7 while prime_candidate < max_prime: primes_count += is_prime(lowerCamelCase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(F"""{solution() = }""")
73
'''simple docstring''' import math def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = input("Enter message: " ) UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) ) UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " ) if mode.lower().startswith("e" ): UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ ) elif mode.lower().startswith("d" ): UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + "|"}""" ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = [""] * key for col in range(snake_case_ ): UpperCAmelCase_ = col while pointer < len(snake_case_ ): cipher_text[col] += message[pointer] pointer += key return "".join(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key ) UpperCAmelCase_ = key UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ ) UpperCAmelCase_ = [""] * num_cols UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase_ = 0 row += 1 return "".join(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
74
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger() SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] , __a : str ): os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = {"source": "What is love ?", "target": "life"} UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f: f.write(__a ) def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = os.path.join(__a , "output" ) UpperCAmelCase_ = os.path.join(__a , "data" ) self._create_dummy_data(data_dir=__a ) UpperCAmelCase_ = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__a , env=self.get_env() ) UpperCAmelCase_ = os.path.join(__a , "metrics.json" ) with open(__a ) as f: UpperCAmelCase_ = json.load(__a ) return result @require_torch_gpu def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def _lowercase (self : Dict ): UpperCAmelCase_ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def _lowercase (self : Any ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
1
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=3, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=True, lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=[0.5, 0.5, 0.5], lowerCAmelCase=True, lowerCAmelCase=1 / 255, lowerCAmelCase=True, ): """simple docstring""" lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} lowerCamelCase_ =parent lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution lowerCamelCase_ =do_resize lowerCamelCase_ =size lowerCamelCase_ =do_normalize lowerCamelCase_ =image_mean lowerCamelCase_ =image_std lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_pad def lowercase__ ( self ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" if not batched: lowerCamelCase_ =image_inputs[0] if isinstance(lowerCAmelCase, Image.Image ): lowerCamelCase_, lowerCamelCase_ =image.size else: lowerCamelCase_, lowerCamelCase_ =image.shape[1], image.shape[2] if w < h: lowerCamelCase_ =int(self.size['''shortest_edge'''] * h / w ) lowerCamelCase_ =self.size['''shortest_edge'''] elif w > h: lowerCamelCase_ =self.size['''shortest_edge'''] lowerCamelCase_ =int(self.size['''shortest_edge'''] * w / h ) else: lowerCamelCase_ =self.size['''shortest_edge'''] lowerCamelCase_ =self.size['''shortest_edge'''] else: lowerCamelCase_ =[] for image in image_inputs: lowerCamelCase_, lowerCamelCase_ =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[0] )[0] lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : List[str] =DetaImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =DetaImageProcessingTester(self ) @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_rescale''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_pad''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad, lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, np.ndarray ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, torch.Tensor ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''', '''r''' ) as f: lowerCamelCase_ =json.loads(f.read() ) lowerCamelCase_ ={'''image_id''': 39_769, '''annotations''': target} # encode them lowerCamelCase_ =DetaImageProcessor() lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, return_tensors='''pt''' ) # verify pixel values lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) ) # verify area lowerCamelCase_ =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) ) # verify boxes lowerCamelCase_ =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) ) # verify image_id lowerCamelCase_ =torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) ) # verify is_crowd lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) ) # verify class_labels lowerCamelCase_ =torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) ) # verify orig_size lowerCamelCase_ =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) ) # verify size lowerCamelCase_ =torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) ) @slow def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''', '''r''' ) as f: lowerCamelCase_ =json.loads(f.read() ) lowerCamelCase_ ={'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} lowerCamelCase_ =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowerCamelCase_ =DetaImageProcessor(format='''coco_panoptic''' ) lowerCamelCase_ =image_processing(images=lowerCAmelCase, annotations=lowerCAmelCase, masks_path=lowerCAmelCase, return_tensors='''pt''' ) # verify pixel values lowerCamelCase_ =torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3], lowerCAmelCase, atol=1e-4 ) ) # verify area lowerCamelCase_ =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''], lowerCAmelCase ) ) # verify boxes lowerCamelCase_ =torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape, lowerCAmelCase ) lowerCamelCase_ =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0], lowerCAmelCase, atol=1e-3 ) ) # verify image_id lowerCamelCase_ =torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''], lowerCAmelCase ) ) # verify is_crowd lowerCamelCase_ =torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''], lowerCAmelCase ) ) # verify class_labels lowerCamelCase_ =torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''], lowerCAmelCase ) ) # verify masks lowerCamelCase_ =822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item(), lowerCAmelCase ) # verify orig_size lowerCamelCase_ =torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''], lowerCAmelCase ) ) # verify size lowerCamelCase_ =torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''], lowerCAmelCase ) )
75
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time SCREAMING_SNAKE_CASE_: Optional[int] =Lock() def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(snake_case_ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() UpperCAmelCase_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left UpperCAmelCase_ = min(snake_case_ , snake_case_ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(snake_case_ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() UpperCAmelCase_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right UpperCAmelCase_ = max(snake_case_ , snake_case_ ) # after all swaps are performed, send the values back to main result_pipe[1].send(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr for i in range(1 , len(snake_case_ ) - 1 ): UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr process_array_.append( Process( target=snake_case_ , args=( len(snake_case_ ) - 1, arr[len(snake_case_ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(snake_case_ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(snake_case_ ) ): UpperCAmelCase_ = result_pipe[p][0].recv() process_array_[p].join() return arr def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*snake_case_ ) UpperCAmelCase_ = odd_even_transposition(snake_case_ ) print("Sorted List\n" ) print(*snake_case_ ) if __name__ == "__main__": main()
1
0
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: a_ = None a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } a_ = { 'facebook/mbart-large-en-ro': 1024, 'facebook/mbart-large-cc25': 1024, } # fmt: off a_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =VOCAB_FILES_NAMES lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ =['input_ids', 'attention_mask'] lowerCamelCase__ =MBartTokenizer lowerCamelCase__ =[] lowerCamelCase__ =[] def __init__( self : List[Any] , a : Optional[Any]=None , a : Optional[int]=None , a : Optional[int]="<s>" , a : Dict="</s>" , a : int="</s>" , a : Any="<s>" , a : List[str]="<unk>" , a : Any="<pad>" , a : List[str]="<mask>" , a : Optional[int]=None , a : Optional[int]=None , a : List[Any]=None , **a : Tuple , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token super().__init__( vocab_file=a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , **a , ) SCREAMING_SNAKE_CASE : Any = vocab_file SCREAMING_SNAKE_CASE : List[Any] = False if not self.vocab_file else True SCREAMING_SNAKE_CASE : List[str] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) SCREAMING_SNAKE_CASE : str = { lang_code: self.convert_tokens_to_ids(a ) for lang_code in FAIRSEQ_LANGUAGE_CODES } SCREAMING_SNAKE_CASE : Any = src_lang if src_lang is not None else "en_XX" SCREAMING_SNAKE_CASE : Optional[Any] = self.convert_tokens_to_ids(self._src_lang ) SCREAMING_SNAKE_CASE : Optional[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def __UpperCamelCase ( self : List[Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def __UpperCamelCase ( self : List[Any] , a : str ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __UpperCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __UpperCamelCase ( self : List[str] , a : Optional[Any] , a : str , a : Optional[str] , a : Optional[str] , **a : Optional[Any] ) -> str: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) SCREAMING_SNAKE_CASE : List[str] = src_lang SCREAMING_SNAKE_CASE : List[Any] = self(a , add_special_tokens=a , return_tensors=a , **a ) SCREAMING_SNAKE_CASE : List[Any] = self.convert_tokens_to_ids(a ) SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang_id return inputs def __UpperCamelCase ( self : Any , a : List[str] , a : str = "en_XX" , a : Optional[List[str]] = None , a : str = "ro_RO" , **a : Tuple , ) -> BatchEncoding: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = src_lang SCREAMING_SNAKE_CASE : Any = tgt_lang return super().prepare_seqaseq_batch(a , a , **a ) def __UpperCamelCase ( self : str ) -> Dict: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def __UpperCamelCase ( self : Any , a : Optional[Any] ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : str = self.convert_tokens_to_ids(a ) SCREAMING_SNAKE_CASE : Optional[int] = [] SCREAMING_SNAKE_CASE : Optional[int] = [self.eos_token_id, self.cur_lang_code] SCREAMING_SNAKE_CASE : str = self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE : int = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase ( self : str , a : str ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = self.convert_tokens_to_ids(a ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Optional[Any] = [self.eos_token_id, self.cur_lang_code] SCREAMING_SNAKE_CASE : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) SCREAMING_SNAKE_CASE : Optional[int] = self.convert_ids_to_tokens(self.suffix_tokens ) SCREAMING_SNAKE_CASE : str = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def __UpperCamelCase ( self : str , a : str , a : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(a ): logger.error(F"Vocabulary path ({save_directory}) should be a directory." ) return SCREAMING_SNAKE_CASE : str = os.path.join( a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ): copyfile(self.vocab_file , a ) return (out_vocab_file,)
76
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b" UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
"""simple docstring""" # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class UpperCAmelCase_ ( _a): def __init__( self , a , a ) -> List[Any]: super().__init__() self.register_modules(unet=a , scheduler=a ) @torch.no_grad() def __call__( self , a = 1 , a = None , a = 5_0 , a = "pil" , a = True , **a , ) -> Union[ImagePipelineOutput, Tuple]: lowercase__ : Union[str, Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=a , ) lowercase__ : Tuple = image.to(self.device ) # set step values self.scheduler.set_timesteps(a ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowercase__ : Dict = self.unet(a , a ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowercase__ : Optional[int] = self.scheduler.step(a , a , a ).prev_sample lowercase__ : Dict = (image / 2 + 0.5).clamp(0 , 1 ) lowercase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : Union[str, Any] = self.numpy_to_pil(a ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=a ), "This is a local test"
77
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None: '''simple docstring''' if start is None: UpperCAmelCase_ = 0 if end is None: UpperCAmelCase_ = len(snake_case_ ) - 1 if start >= end: return UpperCAmelCase_ = (start + end) // 2 slowsort(snake_case_ , snake_case_ , snake_case_ ) slowsort(snake_case_ , mid + 1 , snake_case_ ) if sequence[end] < sequence[mid]: UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end] slowsort(snake_case_ , snake_case_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
1
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = { """facebook/dpr-ctx_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-single-nq-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-reader-single-nq-base""": ( """https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json""" ), """facebook/dpr-ctx_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-question_encoder-multiset-base""": ( """https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json""" ), """facebook/dpr-reader-multiset-base""": ( """https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json""" ), } class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = """dpr""" def __init__( self :Optional[Any] , lowercase_ :Tuple=3_05_22 , lowercase_ :Optional[int]=7_68 , lowercase_ :List[str]=12 , lowercase_ :Optional[int]=12 , lowercase_ :Union[str, Any]=30_72 , lowercase_ :int="gelu" , lowercase_ :int=0.1 , lowercase_ :int=0.1 , lowercase_ :int=5_12 , lowercase_ :Union[str, Any]=2 , lowercase_ :str=0.02 , lowercase_ :Optional[int]=1E-12 , lowercase_ :List[str]=0 , lowercase_ :Any="absolute" , lowercase_ :int = 0 , **lowercase_ :Dict , ) -> int: super().__init__(pad_token_id=lowercase_ , **lowercase_ ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = projection_dim UpperCAmelCase = position_embedding_type
78
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __A ( UpperCamelCase__ ): a__ : Optional[Any] = DistilBertTokenizer a__ : Any = DistilBertTokenizerFast a__ : str = True @slow def _lowercase (self : int ): UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a ) UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
1
0
'''simple docstring''' import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def __lowercase ( __lowercase ) -> Dict: '''simple docstring''' _A = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(__lowercase , __lowercase ) def __lowercase ( __lowercase ) -> List[Any]: '''simple docstring''' _A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _A = s_dict.pop(__lowercase ) elif "subsample" in key: _A = s_dict.pop(__lowercase ) def __lowercase ( __lowercase ) -> Tuple: '''simple docstring''' _A , _A = emb.weight.shape _A = nn.Linear(__lowercase , __lowercase , bias=__lowercase ) _A = emb.weight.data return lin_layer def __lowercase ( __lowercase , __lowercase ) -> List[Any]: '''simple docstring''' _A = torch.load(__lowercase , map_location="cpu" ) _A = mam_aaa["args"] _A = mam_aaa["model"] _A = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(__lowercase ) rename_keys(__lowercase ) _A = state_dict["decoder.embed_tokens.weight"].shape[0] _A = args.share_decoder_input_output_embed _A = [int(__lowercase ) for i in args.conv_kernel_sizes.split("," )] _A = SpeechaTextConfig( vocab_size=__lowercase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(__lowercase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__lowercase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__lowercase , num_beams=5 , max_length=200 , use_cache=__lowercase , decoder_start_token_id=2 , early_stopping=__lowercase , ) _A = SpeechaTextForConditionalGeneration(__lowercase ) _A , _A = model.model.load_state_dict(__lowercase , strict=__lowercase ) if len(__lowercase ) > 0 and not set(__lowercase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F''' but all the following weights are missing {missing}''' ) if tie_embeds: _A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _A = lm_head_weights model.save_pretrained(__lowercase ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('''--fairseq_path''', type=str, help='''Path to the fairseq model (.pt) file.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCamelCase_ = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
79
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE_: Tuple =[] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) UpperCAmelCase_ = value else: UpperCAmelCase_ = value return new_state_dict def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "" if is_panoptic: UpperCAmelCase_ = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:2_56, :] UpperCAmelCase_ = in_proj_bias[:2_56] UpperCAmelCase_ = in_proj_weight[2_56:5_12, :] UpperCAmelCase_ = in_proj_bias[2_56:5_12] UpperCAmelCase_ = in_proj_weight[-2_56:, :] UpperCAmelCase_ = in_proj_bias[-2_56:] def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCAmelCase_ = "resnet101" if "dc5" in model_name: UpperCAmelCase_ = True UpperCAmelCase_ = "panoptic" in model_name if is_panoptic: UpperCAmelCase_ = 2_50 else: UpperCAmelCase_ = 91 UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "coco-detection-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} # load image processor UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection" UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ ) # prepare image UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" ) UpperCAmelCase_ = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval() UpperCAmelCase_ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCAmelCase_ = "conditional_detr." + src rename_key(snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ = rename_backbone_keys(snake_case_ ) # query, key and value matrices need special treatment read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val # finally, create HuggingFace model and load state dict UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion UpperCAmelCase_ = conditional_detr(snake_case_ ) UpperCAmelCase_ = model(snake_case_ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
1
0
'''simple docstring''' from __future__ import annotations def _UpperCamelCase ( __A , __A , __A ) -> dict[str, float]: '''simple docstring''' if (voltage, current, resistance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if resistance < 0: raise ValueError("Resistance cannot be negative" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
80
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): def __init__(self : int , *__a : Dict , **__a : str ): warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
1
0
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" __lowerCAmelCase = IFInpaintingPipeline __lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} __lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS __lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"} def SCREAMING_SNAKE_CASE ( self ) -> str: return self._get_dummy_components() def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> List[str]: if str(__A ).startswith('''mps''' ): a =torch.manual_seed(__A ) else: a =torch.Generator(device=__A ).manual_seed(__A ) a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A ) a ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def SCREAMING_SNAKE_CASE ( self ) -> str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def SCREAMING_SNAKE_CASE ( self ) -> str: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def SCREAMING_SNAKE_CASE ( self ) -> Any: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: self._test_save_load_local() def SCREAMING_SNAKE_CASE ( self ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
81
'''simple docstring''' from __future__ import annotations import queue class __A : def __init__(self : Optional[Any] , __a : str ): UpperCAmelCase_ = data UpperCAmelCase_ = None UpperCAmelCase_ = None def lowerCAmelCase_ ( ) -> TreeNode: '''simple docstring''' print("\n********Press N to stop entering at any point of time********\n" ) UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower() UpperCAmelCase_ = queue.Queue() UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = q.get() UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """ UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) UpperCAmelCase_ = left_node q.put(snake_case_ ) UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """ UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) UpperCAmelCase_ = right_node q.put(snake_case_ ) raise def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = queue.Queue() q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = queue.Queue() q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = [] while not q.empty(): UpperCAmelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = [] UpperCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(snake_case_ ) UpperCAmelCase_ = n.left # end of while means current node doesn't have left child UpperCAmelCase_ = stack.pop() # start to traverse its right child UpperCAmelCase_ = n.right def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = [] UpperCAmelCase_ = node while n or stack: while n: stack.append(snake_case_ ) UpperCAmelCase_ = n.left UpperCAmelCase_ = stack.pop() print(n.data , end="," ) UpperCAmelCase_ = n.right def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ , UpperCAmelCase_ = [], [] UpperCAmelCase_ = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str: '''simple docstring''' if not s: return "\n" + width * char UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) SCREAMING_SNAKE_CASE_: TreeNode =build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
1
0
def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = """""" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key _lowerCAmelCase = remove_duplicates(key.upper() ) _lowerCAmelCase = len(snake_case ) # First fill cipher with key characters _lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(snake_case )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(snake_case ) , 26 ): _lowerCAmelCase = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 _lowerCAmelCase = alphabet[i - offset] _lowerCAmelCase = char return cipher_alphabet def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" return "".join(cipher_map.get(snake_case , snake_case ) for ch in message.upper() ) def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(snake_case , snake_case ) for ch in message.upper() ) def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = input("""Enter message to encode or decode: """ ).strip() _lowerCAmelCase = input("""Enter keyword: """ ).strip() _lowerCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower() try: _lowerCAmelCase = {"""e""": encipher, """d""": decipher}[option] except KeyError: raise KeyError("""invalid input option""" ) _lowerCAmelCase = create_cipher_map(snake_case ) print(func(snake_case , snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
82
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase__ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class __A ( UpperCamelCase__ ): def _lowercase (self : str , __a : GenericTensor ): if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ) else: raise ValueError("Unsupported framework" ) return masked_index def _lowercase (self : Tuple , __a : GenericTensor ): UpperCAmelCase_ = self.get_masked_index(__a ) UpperCAmelCase_ = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def _lowercase (self : List[Any] , __a : GenericTensor ): if isinstance(__a , __a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__a ) def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ): if return_tensors is None: UpperCAmelCase_ = self.framework UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a ) self.ensure_exactly_one_mask_token(__a ) return model_inputs def _lowercase (self : str , __a : Optional[int] ): UpperCAmelCase_ = self.model(**__a ) UpperCAmelCase_ = model_inputs["input_ids"] return model_outputs def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: UpperCAmelCase_ = target_ids.shape[0] UpperCAmelCase_ = model_outputs["input_ids"][0] UpperCAmelCase_ = model_outputs["logits"] if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] UpperCAmelCase_ = outputs.numpy() UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = stable_softmax(__a , axis=-1 ) if target_ids is not None: UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) ) UpperCAmelCase_ = tf.expand_dims(__a , 0 ) UpperCAmelCase_ = tf.math.top_k(__a , k=__a ) UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy() else: UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = logits.softmax(dim=-1 ) if target_ids is not None: UpperCAmelCase_ = probs[..., target_ids] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a ) UpperCAmelCase_ = [] UpperCAmelCase_ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): UpperCAmelCase_ = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place UpperCAmelCase_ = input_ids.numpy().copy() if target_ids is not None: UpperCAmelCase_ = target_ids[p].tolist() UpperCAmelCase_ = p # Filter padding out: UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a ) UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(__a ) result.append(__a ) if single_mask: return result[0] return result def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ): if isinstance(__a , __a ): UpperCAmelCase_ = [targets] try: UpperCAmelCase_ = self.tokenizer.get_vocab() except Exception: UpperCAmelCase_ = {} UpperCAmelCase_ = [] for target in targets: UpperCAmelCase_ = vocab.get(__a , __a ) if id_ is None: UpperCAmelCase_ = self.tokenizer( __a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"] if len(__a ) == 0: logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ "We cannot replace it with anything meaningful, ignoring it" ) continue UpperCAmelCase_ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) UpperCAmelCase_ = list(set(__a ) ) if len(__a ) == 0: raise ValueError("At least one target must be provided when passed." ) UpperCAmelCase_ = np.array(__a ) return target_ids def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ): UpperCAmelCase_ = {} if targets is not None: UpperCAmelCase_ = self.get_target_ids(__a , __a ) UpperCAmelCase_ = target_ids if top_k is not None: UpperCAmelCase_ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ): UpperCAmelCase_ = super().__call__(__a , **__a ) if isinstance(__a , __a ) and len(__a ) == 1: return outputs[0] return outputs
1
0
'''simple docstring''' from collections.abc import Generator def A__ ( ): _UpperCamelCase , _UpperCamelCase : Tuple = 0, 1 while True: _UpperCamelCase , _UpperCamelCase : Union[str, Any] = b, a + b yield b def A__ ( UpperCAmelCase_ = 1_0_0_0 ): _UpperCamelCase : List[Any] = 1 _UpperCamelCase : str = fibonacci_generator() while len(str(next(UpperCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
83
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__) @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : str a__ : str a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : List[int] a__ : Optional[List[int]] = None a__ : Optional[List[int]] = None a__ : Optional[Union[int, float]] = None a__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __A ( UpperCamelCase__ ): a__ : List[InputFeatures] def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = os.path.join( __a , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , ) UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase_ = cached_features_file + ".lock" with FileLock(__a ): if os.path.exists(__a ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) UpperCAmelCase_ = torch.load(__a ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) UpperCAmelCase_ = ( processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) ) logger.info("Training examples: %s" , len(__a ) ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) logger.info("Saving features into cached file %s" , __a ) torch.save(self.features , __a ) def __len__(self : List[Any] ): return len(self.features ) def __getitem__(self : Any , __a : Optional[Any] ): return self.features[i] def _lowercase (self : Union[str, Any] ): return self.label_list if is_tf_available(): import tensorflow as tf class __A : a__ : List[InputFeatures] def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(__a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) UpperCAmelCase_ = tf.data.Dataset.from_generator( __a , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _lowercase (self : int ): return self.dataset def __len__(self : Any ): return len(self.features ) def __getitem__(self : int , __a : Union[str, Any] ): return self.features[i] def _lowercase (self : int ): return self.label_list class __A ( UpperCamelCase__ ): def _lowercase (self : List[Any] , __a : Dict ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" ) def _lowercase (self : Any , __a : List[Any] ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" ) def _lowercase (self : Any ): return ["contradiction", "entailment", "neutral"] def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ): UpperCAmelCase_ = [] for i, line in enumerate(__a ): if i == 0: continue UpperCAmelCase_ = "%s-%s" % (set_type, line[0]) UpperCAmelCase_ = line[5] UpperCAmelCase_ = line[6] UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7] UpperCAmelCase_ = line[0] examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) ) return examples def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )} UpperCAmelCase_ = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ): if ex_index % 1_00_00 == 0: logger.info("Writing example %d" % (ex_index) ) UpperCAmelCase_ = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0 UpperCAmelCase_ = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE_: int ={ 'hans': 3, } SCREAMING_SNAKE_CASE_: Any ={ 'hans': HansProcessor, }
1
0
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): UpperCAmelCase_ :Any = DebertaVaTokenizer UpperCAmelCase_ :int = DebertaVaTokenizerFast UpperCAmelCase_ :Optional[Any] = True UpperCAmelCase_ :List[Any] = True def __lowerCAmelCase ( self ) -> List[str]: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ :List[Any] = DebertaVaTokenizer(__A , unk_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , __A ) -> int: lowerCAmelCase_ :List[str] = """this is a test""" lowerCAmelCase_ :Union[str, Any] = """this is a test""" return input_text, output_text def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Dict = """<pad>""" lowerCAmelCase_ :Dict = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """[PAD]""" ) self.assertEqual(len(__A ) , 3_0001 ) def __lowerCAmelCase ( self ) -> str: self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 ) def __lowerCAmelCase ( self ) -> str: # fmt: off lowerCAmelCase_ :Union[str, Any] = """ \tHeLLo!how \n Are yoU? """ lowerCAmelCase_ :int = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""] # fmt: on lowerCAmelCase_ :List[str] = DebertaVaTokenizer(__A , do_lower_case=__A ) lowerCAmelCase_ :Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Union[str, Any] = DebertaVaTokenizerFast(__A , do_lower_case=__A ) lowerCAmelCase_ :List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def __lowerCAmelCase ( self ) -> Any: pass @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def __lowerCAmelCase ( self ) -> int: pass def __lowerCAmelCase ( self ) -> Dict: # fmt: off lowerCAmelCase_ :List[str] = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase_ :str = DebertaVaTokenizer(__A , split_by_punct=__A ) lowerCAmelCase_ :str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Any = DebertaVaTokenizerFast(__A , split_by_punct=__A ) lowerCAmelCase_ :int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Any: # fmt: off lowerCAmelCase_ :Any = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :Tuple = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase_ :int = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Union[str, Any] = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: # fmt: off lowerCAmelCase_ :int = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on lowerCAmelCase_ :Tuple = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> Any: # fmt: off lowerCAmelCase_ :Any = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCAmelCase_ :List[Any] = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Optional[int] = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> str: # fmt: off lowerCAmelCase_ :Optional[int] = """ \tHeLLo!how \n Are yoU? """ lowerCAmelCase_ :List[Any] = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""] # fmt: on lowerCAmelCase_ :Union[str, Any] = DebertaVaTokenizer(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :str = DebertaVaTokenizerFast(__A , do_lower_case=__A , split_by_punct=__A ) lowerCAmelCase_ :Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Tuple = self.get_tokenizer() lowerCAmelCase_ :str = self.get_rust_tokenizer() lowerCAmelCase_ :List[str] = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__A , add_special_tokens=__A ) ) lowerCAmelCase_ :Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__A , add_special_tokens=__A ) ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :str = tokenizer.encode(__A , add_special_tokens=__A ) lowerCAmelCase_ :int = rust_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :List[str] = self.get_rust_tokenizer() lowerCAmelCase_ :Dict = tokenizer.encode(__A ) lowerCAmelCase_ :Dict = rust_tokenizer.encode(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :str = """This is a test""" lowerCAmelCase_ :int = [13, 1, 4398, 25, 21, 1289] lowerCAmelCase_ :Optional[Any] = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""] lowerCAmelCase_ :str = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""] lowerCAmelCase_ :Any = DebertaVaTokenizer(__A , keep_accents=__A ) lowerCAmelCase_ :Optional[int] = DebertaVaTokenizerFast(__A , keep_accents=__A ) lowerCAmelCase_ :Union[str, Any] = tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :List[Any] = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Tuple = tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :int = rust_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Dict = rust_tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :List[str] = rust_tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual(__A , __A ) # fmt: off lowerCAmelCase_ :Tuple = """I was born in 92000, and this is falsé.""" lowerCAmelCase_ :Optional[Any] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowerCAmelCase_ :List[Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ] lowerCAmelCase_ :Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on lowerCAmelCase_ :int = tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Any = tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :List[Any] = tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :str = rust_tokenizer.encode(__A , add_special_tokens=__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :Optional[Any] = rust_tokenizer.tokenize(__A ) self.assertListEqual(__A , __A ) lowerCAmelCase_ :str = rust_tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual(__A , __A ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Optional[int] = DebertaVaTokenizer(__A ) lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" ) lowerCAmelCase_ :Dict = tokenizer.encode("""multi-sequence build""" ) lowerCAmelCase_ :int = tokenizer.build_inputs_with_special_tokens(__A ) lowerCAmelCase_ :Optional[Any] = tokenizer.build_inputs_with_special_tokens(__A , __A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __A , ) @slow def __lowerCAmelCase ( self ) -> Tuple: # fmt: off lowerCAmelCase_ :List[Any] = {"""input_ids""": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__A , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
84
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Tuple ={} class __A ( UpperCamelCase__ ): a__ : int = """llama""" a__ : Any = ["""past_key_values"""] def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = hidden_size UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads # for backward compatibility if num_key_value_heads is None: UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = num_key_value_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = initializer_range UpperCAmelCase_ = rms_norm_eps UpperCAmelCase_ = pretraining_tp UpperCAmelCase_ = use_cache UpperCAmelCase_ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def _lowercase (self : List[str] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) UpperCAmelCase_ = self.rope_scaling.get("type" , __a ) UpperCAmelCase_ = self.rope_scaling.get("factor" , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
1
0
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) _SCREAMING_SNAKE_CASE : Dict = "\\n Text data.\n Second line of data." _SCREAMING_SNAKE_CASE : Union[str, Any] = "file" @pytest.fixture(scope="session" ) def UpperCamelCase_( snake_case : Any ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") snake_case_ = bytes(snake_case , "utf-8" ) with zstd.open(snake_case , "wb" ) as f: f.write(snake_case ) return path @pytest.fixture def UpperCamelCase_( snake_case : Dict ): '''simple docstring''' with open(os.path.join(tmpfs.local_root_dir , snake_case ) , "w" ) as f: f.write(snake_case ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def UpperCamelCase_( snake_case : Tuple , snake_case : Optional[int] , snake_case : int , snake_case : Optional[int] , snake_case : int , snake_case : Tuple ): '''simple docstring''' snake_case_ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} snake_case_ = input_paths[compression_format] snake_case_ = tmp_path / "cache" snake_case_ = DownloadConfig(cache_dir=snake_case , extract_compressed_file=snake_case ) snake_case_ = cached_path(snake_case , download_config=snake_case ) with open(snake_case ) as f: snake_case_ = f.read() with open(snake_case ) as f: snake_case_ = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Dict , snake_case : Tuple , snake_case : List[Any] , snake_case : Tuple ): '''simple docstring''' snake_case_ = "custom_cache" snake_case_ = "custom_extracted_dir" snake_case_ = tmp_path / "custom_extracted_path" if default_extracted: snake_case_ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , snake_case ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(snake_case ) ) snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) snake_case_ = xz_file snake_case_ = ( DownloadConfig(extract_compressed_file=snake_case ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=snake_case ) ) snake_case_ = cached_path(snake_case , download_config=snake_case ) assert Path(snake_case ).parent.parts[-2:] == expected def UpperCamelCase_( snake_case : Optional[Any] ): '''simple docstring''' snake_case_ = str(Path(snake_case ).resolve() ) assert cached_path(snake_case ) == text_file # relative path snake_case_ = str(Path(snake_case ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(snake_case ) == text_file def UpperCamelCase_( snake_case : Tuple ): '''simple docstring''' snake_case_ = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(snake_case ): cached_path(snake_case ) # relative path snake_case_ = "./__missing_file__.txt" with pytest.raises(snake_case ): cached_path(snake_case ) def UpperCamelCase_( snake_case : Union[str, Any] ): '''simple docstring''' snake_case_ = get_from_cache(f'tmp://{tmpfs_file}' ) with open(snake_case ) as f: snake_case_ = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case ) def UpperCamelCase_( ): '''simple docstring''' with pytest.raises(snake_case ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case ) def UpperCamelCase_( snake_case : Union[str, Any] ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(snake_case ): http_get("https://huggingface.co" , temp_file=snake_case ) with pytest.raises(snake_case ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case ) def UpperCamelCase_( snake_case : Any ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(snake_case ): ftp_get("ftp://huggingface.co" , temp_file=snake_case ) with pytest.raises(snake_case ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , snake_case ) def UpperCamelCase_( snake_case : List[str] ): '''simple docstring''' snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(snake_case ): fsspec_get("s3://huggingface.co" , temp_file=snake_case ) with pytest.raises(snake_case ): fsspec_head("s3://huggingface.co" )
85
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): def _lowercase (self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase (self : str ): UpperCAmelCase_ = 1 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def _lowercase (self : int ): torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _lowercase (self : Any ): torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowercase (self : Optional[Any] ): torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__a ) def _lowercase (self : Any ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0] UpperCAmelCase_ = image[0, -3:, -3:, -1] UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase (self : Optional[int] ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowercase (self : str ): UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCAmelCase_ = unet.half() UpperCAmelCase_ = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : List[Any] ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _lowercase (self : Tuple ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _lowercase (self : List[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , ) UpperCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
1
0
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(_UpperCamelCase , _UpperCamelCase ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate __lowerCAmelCase : Optional[Any] = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __lowerCAmelCase : str = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
86
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __A ( UpperCamelCase__ ): def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ): UpperCAmelCase_ = 1.0 if scale is None else scale UpperCAmelCase_ = 0.0 if loc is None else loc super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] ) @property def _lowercase (self : Union[str, Any] ): return self.base_dist.mean * self.scale + self.loc @property def _lowercase (self : List[Any] ): return self.base_dist.variance * self.scale**2 @property def _lowercase (self : List[Any] ): return self.variance.sqrt() class __A ( nn.Module ): def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ): super().__init__(**__a ) UpperCAmelCase_ = args_dim UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] ) UpperCAmelCase_ = domain_map def _lowercase (self : List[str] , __a : torch.Tensor ): UpperCAmelCase_ = [proj(__a ) for proj in self.proj] return self.domain_map(*__a ) class __A ( nn.Module ): def __init__(self : Union[str, Any] , __a : List[str] ): super().__init__() UpperCAmelCase_ = function def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ): return self.function(__a , *__a ) class __A : a__ : type a__ : int a__ : Dict[str, int] def __init__(self : List[Any] , __a : int = 1 ): UpperCAmelCase_ = dim UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim} def _lowercase (self : Any , __a : Any ): if self.dim == 1: return self.distribution_class(*__a ) else: return Independent(self.distribution_class(*__a ) , 1 ) def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ): UpperCAmelCase_ = self._base_distribution(__a ) if loc is None and scale is None: return distr else: return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim ) @property def _lowercase (self : Any ): return () if self.dim == 1 else (self.dim,) @property def _lowercase (self : Dict ): return len(self.event_shape ) @property def _lowercase (self : Tuple ): return 0.0 def _lowercase (self : List[str] , __a : int ): return ParameterProjection( in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _lowercase (self : Optional[int] , *__a : torch.Tensor ): raise NotImplementedError() @staticmethod def _lowercase (__a : torch.Tensor ): return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0 class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} a__ : type = StudentT @classmethod def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCAmelCase_ = 2.0 + cls.squareplus(__a ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"loc": 1, "scale": 1} a__ : type = Normal @classmethod def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"total_count": 1, "logits": 1} a__ : type = NegativeBinomial @classmethod def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _lowercase (self : List[str] , __a : str ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if self.dim == 1: return self.distribution_class(total_count=__a , logits=__a ) else: return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 ) def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
1
0
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class snake_case_ ( __A ): __A : List[Any] = "blenderbot-small" __A : Tuple = ["past_key_values"] __A : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Any , lowercase_ : Any=5_02_65 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Optional[int]=8 , lowercase_ : Tuple=20_48 , lowercase_ : Any=16 , lowercase_ : Optional[int]=8 , lowercase_ : Any=20_48 , lowercase_ : Any=16 , lowercase_ : Tuple=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int="gelu" , lowercase_ : str=5_12 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : str=1 , lowercase_ : int=False , lowercase_ : Optional[int]=0 , lowercase_ : Tuple=1 , lowercase_ : int=2 , lowercase_ : List[str]=2 , **lowercase_ : Tuple , ) -> Union[str, Any]: lowercase__ : Any = vocab_size lowercase__ : int = max_position_embeddings lowercase__ : Optional[Any] = d_model lowercase__ : List[str] = encoder_ffn_dim lowercase__ : List[str] = encoder_layers lowercase__ : List[Any] = encoder_attention_heads lowercase__ : List[str] = decoder_ffn_dim lowercase__ : Optional[Any] = decoder_layers lowercase__ : Union[str, Any] = decoder_attention_heads lowercase__ : int = dropout lowercase__ : Optional[int] = attention_dropout lowercase__ : Dict = activation_dropout lowercase__ : Union[str, Any] = activation_function lowercase__ : Dict = init_std lowercase__ : int = encoder_layerdrop lowercase__ : List[str] = decoder_layerdrop lowercase__ : str = use_cache lowercase__ : Dict = encoder_layers lowercase__ : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : str = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowercase__ : Tuple = {0: "batch"} lowercase__ : Any = {0: "batch", 1: "past_decoder_sequence + sequence"} else: lowercase__ : Dict = {0: "batch", 1: "decoder_sequence"} lowercase__ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. lowercase__ : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: lowercase__ , lowercase__ : Any = self.num_layers for i in range(lowercase_ ): lowercase__ : List[str] = {0: "batch", 2: "past_sequence + sequence"} lowercase__ : Any = {0: "batch", 2: "past_sequence + sequence"} else: lowercase__ : int = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase ( self : str ) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : Dict = super().outputs else: lowercase__ : List[str] = super(lowercase_ , self ).outputs if self.use_past: lowercase__ , lowercase__ : Optional[Any] = self.num_layers for i in range(lowercase_ ): lowercase__ : Dict = {0: "batch", 2: "past_sequence + sequence"} lowercase__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase ( self : Tuple , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowercase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Generate decoder inputs lowercase__ : str = seq_length if not self.use_past else 1 lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} lowercase__ : Union[str, Any] = dict(**lowercase_ , **lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowercase__ , lowercase__ : Union[str, Any] = common_inputs["input_ids"].shape lowercase__ : Optional[int] = common_inputs["decoder_input_ids"].shape[1] lowercase__ , lowercase__ : List[str] = self.num_attention_heads lowercase__ : Dict = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase__ : List[str] = decoder_seq_length + 3 lowercase__ : Union[str, Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowercase__ : Tuple = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(lowercase_ , lowercase_ )] , dim=1 ) lowercase__ : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowercase__ , lowercase__ : List[str] = self.num_layers lowercase__ : List[Any] = min(lowercase_ , lowercase_ ) lowercase__ : List[Any] = max(lowercase_ , lowercase_ ) - min_num_layers lowercase__ : int = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(lowercase_ ): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), torch.zeros(lowercase_ ), ) ) # TODO: test this. lowercase__ : str = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(lowercase_ , lowercase_ ): common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) ) return common_inputs def __UpperCamelCase ( self : Optional[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: lowercase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowercase__ , lowercase__ : str = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowercase__ : Dict = seqlen + 2 lowercase__ , lowercase__ : List[str] = self.num_layers lowercase__ , lowercase__ : Optional[Any] = self.num_attention_heads lowercase__ : Optional[int] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowercase__ : Optional[int] = common_inputs["attention_mask"].dtype lowercase__ : List[Any] = torch.cat( [common_inputs["attention_mask"], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 ) lowercase__ : Dict = [ (torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ ) ] return common_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase__ : List[Any] = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(lowercase_ ) lowercase__ : List[Any] = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ ) # Generate dummy inputs according to compute batch and sequence lowercase__ : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size lowercase__ : Union[str, Any] = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) ) return common_inputs def __UpperCamelCase ( self : str , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: lowercase__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) elif self.task == "causal-lm": lowercase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) else: lowercase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ ) return common_inputs def __UpperCamelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ) -> Any: if self.task in ["default", "seq2seq-lm"]: lowercase__ : Dict = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) else: lowercase__ : str = super(lowercase_ , self )._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def _lowercase (self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ): UpperCAmelCase_ = 0.0 for i, j in zip(__a , __a ): n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0 UpperCAmelCase_ = n_correct / len(__a ) return { "accuracy": accuracy, }
1
0
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : int=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : List[str] ) -> str: """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , ) def _lowercase ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> str: """simple docstring""" __magic_name__ = OpenLlamaModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , ) -> Optional[Any]: """simple docstring""" __magic_name__ = True __magic_name__ = OpenLlamaModel(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , ) __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , ) -> Dict: """simple docstring""" __magic_name__ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , ) -> Optional[Any]: """simple docstring""" __magic_name__ = True __magic_name__ = True __magic_name__ = OpenLlamaForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # first forward pass __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , ) __magic_name__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) __magic_name__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 ) __magic_name__ = torch.cat([input_mask, next_mask] , dim=-1 ) __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0] # select random slice __magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() __magic_name__ = output_from_no_past[:, -3:, random_slice_idx].detach() __magic_name__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) a__ = (OpenLlamaForCausalLM,) if is_torch_available() else () a__ = ( { """feature-extraction""": OpenLlamaModel, """text-classification""": OpenLlamaForSequenceClassification, """text-generation""": OpenLlamaForCausalLM, """zero-shot""": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = OpenLlamaModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : List[Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : str ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = input_dict["""input_ids"""] __magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ ) __magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase ( self : List[str] ) -> Tuple: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = """single_label_classification""" __magic_name__ = input_dict["""input_ids"""] __magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ ) __magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase ( self : str ) -> Dict: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = """multi_label_classification""" __magic_name__ = input_dict["""input_ids"""] __magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ ) __magic_name__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" ) def _lowercase ( self : str ) -> int: """simple docstring""" pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] ) -> str: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = ids_tensor([1, 10] , config.vocab_size ) __magic_name__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __magic_name__ = OpenLlamaModel(UpperCamelCase__ ) original_model.to(UpperCamelCase__ ) original_model.eval() __magic_name__ = original_model(UpperCamelCase__ ).last_hidden_state __magic_name__ = original_model(UpperCamelCase__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __magic_name__ = {"""type""": scaling_type, """factor""": 10.0} __magic_name__ = OpenLlamaModel(UpperCamelCase__ ) scaled_model.to(UpperCamelCase__ ) scaled_model.eval() __magic_name__ = scaled_model(UpperCamelCase__ ).last_hidden_state __magic_name__ = scaled_model(UpperCamelCase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
88
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]: '''simple docstring''' model.train() UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict: '''simple docstring''' set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(snake_case_ ) UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ )) if accelerator.num_processes > 1: check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ = RegressionDataset(length=96 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if iteration < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if batch_num < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(snake_case_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(snake_case_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case_ , snake_case_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Dict ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
1
0
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ = 3 , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 1000000 ) -> int: _a : Tuple = 0 _a : List[Any] = 1 for current_denominator in range(1 , limit + 1 ): _a : int = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _a : Any = current_numerator _a : Dict = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
89
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(snake_case_ , x % y ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int: '''simple docstring''' UpperCAmelCase_ = 1 for i in range(1 , n + 1 ): UpperCAmelCase_ = lcm(snake_case_ , snake_case_ ) return g if __name__ == "__main__": print(f"{solution() = }")
1
0
class __lowerCAmelCase : """simple docstring""" def __init__( self , lowerCamelCase__ ) -> Any: '''simple docstring''' __lowerCamelCase = n __lowerCamelCase = [None] * self.n __lowerCamelCase = 0 # index of the first element __lowerCamelCase = 0 __lowerCamelCase = 0 def __len__( self ) -> int: '''simple docstring''' return self.size def lowercase_ ( self ) -> bool: '''simple docstring''' return self.size == 0 def lowercase_ ( self ) -> str: '''simple docstring''' return False if self.is_empty() else self.array[self.front] def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]: '''simple docstring''' if self.size >= self.n: raise Exception('QUEUE IS FULL' ) __lowerCamelCase = data __lowerCamelCase = (self.rear + 1) % self.n self.size += 1 return self def lowercase_ ( self ) -> Tuple: '''simple docstring''' if self.size == 0: raise Exception('UNDERFLOW' ) __lowerCamelCase = self.array[self.front] __lowerCamelCase = None __lowerCamelCase = (self.front + 1) % self.n self.size -= 1 return temp
90
'''simple docstring''' import os from math import logaa def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int: '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ): UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) ) if x * logaa(snake_case_ ) > largest: UpperCAmelCase_ = x * logaa(snake_case_ ) UpperCAmelCase_ = i + 1 return result if __name__ == "__main__": print(solution())
1
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class lowerCAmelCase__ ( UpperCAmelCase__ ): '''simple docstring''' __UpperCamelCase = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
91
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = checkpoint UpperCAmelCase_ = {} UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["quant_conv.bias"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ ) } for i in range(snake_case_ ): UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""" ) UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""" ) UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) for i in range(snake_case_ ): UpperCAmelCase_ = num_up_blocks - 1 - i UpperCAmelCase_ = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) return new_checkpoint def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict: '''simple docstring''' UpperCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCAmelCase_ = io.BytesIO(r.content ) UpperCAmelCase_ = OmegaConf.load(snake_case_ ) UpperCAmelCase_ = 5_12 UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCAmelCase_ = {} with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f: for key in f.keys(): UpperCAmelCase_ = f.get_tensor(snake_case_ ) else: UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"] # Convert the VAE model. UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ ) UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ ) UpperCAmelCase_ = AutoencoderKL(**snake_case_ ) vae.load_state_dict(snake_case_ ) vae.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') SCREAMING_SNAKE_CASE_: str =parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
1
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function UpperCamelCase__ = 1.054571817E-34 # unit of ℏ : J * s UpperCamelCase__ = 3E8 # unit of c : m * s^-1 def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: __lowerCAmelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_40 * (distance) ** 4 ) return {"force": force} elif area == 0: __lowerCAmelCase = (2_40 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: __lowerCAmelCase = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
92
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __A ( unittest.TestCase ): def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (image_size // patch_size) ** 2 UpperCAmelCase_ = num_patches + 1 def _lowercase (self : Any ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , ) return config, pixel_values def _lowercase (self : Dict , __a : Any , __a : List[Any] ): UpperCAmelCase_ = FlaxViTModel(config=__a ) UpperCAmelCase_ = model(__a ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (self.image_size, self.image_size) UpperCAmelCase_ = (self.patch_size, self.patch_size) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _lowercase (self : Tuple , __a : str , __a : Any ): UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = FlaxViTForImageClassification(config=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = FlaxViTForImageClassification(__a ) UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _lowercase (self : Any ): UpperCAmelCase_ = FlaxViTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(__a , __a ) UpperCAmelCase_ = model_class(__a ) @jax.jit def model_jitted(__a : Tuple , **__a : List[Any] ): return model(pixel_values=__a , **__a ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase (self : Tuple ): for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__a )
1
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _lowercase : Any = logging.get_logger(__name__) class lowerCAmelCase__ ( lowerCamelCase_ ): lowerCAmelCase_ = ['''pixel_values'''] def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_55 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) lowercase_ : str = size if size is not None else {'''height''': 3_84, '''width''': 3_84} lowercase_ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = do_resize lowercase_ : Dict = size lowercase_ : Optional[Any] = resample lowercase_ : Tuple = do_rescale lowercase_ : str = rescale_factor lowercase_ : Union[str, Any] = do_normalize lowercase_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase_ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowercase_ : Dict = do_convert_rgb def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase_ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) lowercase_ : Any = (size['''height'''], size['''width''']) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ): """simple docstring""" lowercase_ : List[Any] = do_resize if do_resize is not None else self.do_resize lowercase_ : Dict = resample if resample is not None else self.resample lowercase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale lowercase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ : str = do_normalize if do_normalize is not None else self.do_normalize lowercase_ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean lowercase_ : List[Any] = image_std if image_std is not None else self.image_std lowercase_ : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ : List[Any] = size if size is not None else self.size lowercase_ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ : int = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images] # All transformations expect numpy arrays. lowercase_ : Union[str, Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: lowercase_ : List[Any] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: lowercase_ : Any = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: lowercase_ : str = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images] lowercase_ : List[str] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] lowercase_ : Dict = BatchFeature(data={'''pixel_values''': images} , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_outputs
93
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 5 # Realm tok UpperCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__a , exist_ok=__a ) def _lowercase (self : Optional[Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def _lowercase (self : Any ): shutil.rmtree(self.tmpdirname ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase (self : List[str] ): UpperCAmelCase_ = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase (self : Any ): UpperCAmelCase_ = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__a , ) return block_records def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _lowercase (self : int ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual([False, True, True] , __a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: UpperCAmelCase_ = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
1
0
from math import factorial def __lowerCamelCase ( UpperCAmelCase_ : int = 100 ): """simple docstring""" return sum(map(UpperCAmelCase_ , str(factorial(UpperCAmelCase_ ) ) ) ) if __name__ == "__main__": print(solution(int(input('''Enter the Number: ''').strip())))
94
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K) def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float: '''simple docstring''' if donor_conc <= 0: raise ValueError("Donor concentration should be positive" ) elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive" ) elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive" ) elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
UpperCAmelCase : Tuple = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} UpperCAmelCase : Optional[int] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[bool] ): """simple docstring""" a__ : Union[str, Any] =True a__ : Any =[] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) order.append(SCREAMING_SNAKE_CASE ) return order def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[bool] ): """simple docstring""" a__ : List[str] =True a__ : Tuple =[vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return component def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] ): """simple docstring""" a__ : str =len(SCREAMING_SNAKE_CASE ) * [False] a__ : dict[int, list[int]] ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE ) a__ : Optional[Any] =[] for i, was_visited in enumerate(SCREAMING_SNAKE_CASE ): if not was_visited: order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : List[str] =[] a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE ) * [False] for i in range(len(SCREAMING_SNAKE_CASE ) ): a__ : Any =order[len(SCREAMING_SNAKE_CASE ) - i - 1] if not visited[vert]: a__ : List[str] =find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) components_list.append(SCREAMING_SNAKE_CASE ) return components_list
95
'''simple docstring''' import math def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = input("Enter message: " ) UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) ) UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " ) if mode.lower().startswith("e" ): UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ ) elif mode.lower().startswith("d" ): UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + "|"}""" ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = [""] * key for col in range(snake_case_ ): UpperCAmelCase_ = col while pointer < len(snake_case_ ): cipher_text[col] += message[pointer] pointer += key return "".join(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key ) UpperCAmelCase_ = key UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ ) UpperCAmelCase_ = [""] * num_cols UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase_ = 0 row += 1 return "".join(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
0
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss lowercase__ = pytest.mark.integration @require_faiss class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): _lowerCamelCase : int = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase ) for x in np.arange(30 ).tolist()]} ) return dset def A_ ( self ): import faiss _lowerCamelCase : Dataset = self._create_dummy_dataset() _lowerCamelCase : str = dset.map( lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase ) _lowerCamelCase : Optional[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) _lowerCamelCase, _lowerCamelCase : int = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def A_ ( self ): import faiss _lowerCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) _lowerCamelCase, _lowerCamelCase : Optional[int] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self ): import faiss _lowerCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) _lowerCamelCase, _lowerCamelCase : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self ): _lowerCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(lowercase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def A_ ( self ): from elasticsearch import Elasticsearch _lowerCamelCase : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: _lowerCamelCase : Tuple = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) _lowerCamelCase : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} _lowerCamelCase : Optional[int] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=lowercase ) _lowerCamelCase, _lowerCamelCase : Union[str, Any] = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): import faiss _lowerCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query _lowerCamelCase : Dict = np.zeros(5 , dtype=np.floataa ) _lowerCamelCase : Dict = 1 _lowerCamelCase, _lowerCamelCase : int = index.search(lowercase ) self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries _lowerCamelCase : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1] _lowerCamelCase, _lowerCamelCase : str = index.search_batch(lowercase ) self.assertRaises(lowercase , index.search_batch , queries[0] ) _lowerCamelCase : List[str] = [scores[0] for scores in total_scores] _lowerCamelCase : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , lowercase ) def A_ ( self ): import faiss _lowerCamelCase : Tuple = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) _lowerCamelCase : int = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(lowercase ): _lowerCamelCase : List[Any] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def A_ ( self ): import faiss _lowerCamelCase : Dict = faiss.IndexFlat(5 ) _lowerCamelCase : Union[str, Any] = FaissIndex(custom_index=lowercase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def A_ ( self ): import faiss _lowerCamelCase : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file: index.save(tmp_file.name ) _lowerCamelCase : Union[str, Any] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) _lowerCamelCase : Tuple = np.zeros(5 , dtype=np.floataa ) _lowerCamelCase : Optional[int] = 1 _lowerCamelCase, _lowerCamelCase : Tuple = index.search(lowercase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def _snake_case ( lowercase__ ): import faiss _lowerCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) _lowerCamelCase : Dict = 'index.faiss' _lowerCamelCase : Optional[int] = f'''mock://{index_name}''' index.save(lowercase__ , storage_options=mockfs.storage_options ) _lowerCamelCase : Dict = FaissIndex.load(lowercase__ , storage_options=mockfs.storage_options ) _lowerCamelCase : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) _lowerCamelCase : Any = 1 _lowerCamelCase, _lowerCamelCase : List[Any] = index.search(lowercase__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def A_ ( self ): from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: _lowerCamelCase : Tuple = Elasticsearch() _lowerCamelCase : List[Any] = {'acknowledged': True} _lowerCamelCase : Optional[Any] = ElasticSearchIndex(es_client=lowercase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query _lowerCamelCase : Optional[Any] = 'foo' _lowerCamelCase : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} _lowerCamelCase, _lowerCamelCase : List[Any] = index.search(lowercase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout _lowerCamelCase : List[str] = 'foo' _lowerCamelCase : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} _lowerCamelCase, _lowerCamelCase : str = index.search(lowercase , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries _lowerCamelCase : Dict = ['foo', 'bar', 'foobar'] _lowerCamelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} _lowerCamelCase, _lowerCamelCase : List[str] = index.search_batch(lowercase ) _lowerCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] _lowerCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([1, 1, 1] , lowercase ) # batched queries with timeout _lowerCamelCase : Optional[int] = ['foo', 'bar', 'foobar'] _lowerCamelCase : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} _lowerCamelCase, _lowerCamelCase : Union[str, Any] = index.search_batch(lowercase , request_timeout=30 ) _lowerCamelCase : Optional[int] = [scores[0] for scores in total_scores] _lowerCamelCase : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([1, 1, 1] , lowercase )
96
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger() SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] , __a : str ): os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = {"source": "What is love ?", "target": "life"} UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f: f.write(__a ) def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = os.path.join(__a , "output" ) UpperCAmelCase_ = os.path.join(__a , "data" ) self._create_dummy_data(data_dir=__a ) UpperCAmelCase_ = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__a , env=self.get_env() ) UpperCAmelCase_ = os.path.join(__a , "metrics.json" ) with open(__a ) as f: UpperCAmelCase_ = json.load(__a ) return result @require_torch_gpu def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def _lowercase (self : Dict ): UpperCAmelCase_ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def _lowercase (self : Any ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
1
0
'''simple docstring''' import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class lowercase : """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase__ :int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCamelCase__ :int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCamelCase__ :int = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCamelCase__ :int = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCamelCase__ :Optional[Any] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def lowerCAmelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase__ :int = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) UpperCamelCase__ :List[str] = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) UpperCamelCase__ :Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) UpperCamelCase__ :Tuple = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0001 , beta_end=0.02 , ) torch.manual_seed(0 ) UpperCamelCase__ :str = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.get_dummy_components() UpperCamelCase__ :List[Any] = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCamelCase__ :int = self.get_dummy_inputs(UpperCamelCase_ ) UpperCamelCase__ :List[str] = inputs['''prompt'''] UpperCamelCase__ :Tuple = inputs['''generator'''] UpperCamelCase__ :Optional[Any] = inputs['''num_inference_steps'''] UpperCamelCase__ :List[str] = inputs['''output_type'''] if "image" in inputs: UpperCamelCase__ :Optional[int] = inputs['''image'''] else: UpperCamelCase__ :Optional[Any] = None if "mask_image" in inputs: UpperCamelCase__ :List[Any] = inputs['''mask_image'''] else: UpperCamelCase__ :Union[str, Any] = None if "original_image" in inputs: UpperCamelCase__ :Optional[Any] = inputs['''original_image'''] else: UpperCamelCase__ :Optional[Any] = None UpperCamelCase__ , UpperCamelCase__ :Tuple = pipe.encode_prompt(UpperCamelCase_ ) # inputs with prompt converted to embeddings UpperCamelCase__ :List[str] = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCamelCase__ :List[str] = image if mask_image is not None: UpperCamelCase__ :List[Any] = mask_image if original_image is not None: UpperCamelCase__ :Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = pipe(**UpperCamelCase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase_ ) UpperCamelCase__ :Dict = self.pipeline_class.from_pretrained(UpperCamelCase_ ) pipe_loaded.to(UpperCamelCase_ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCamelCase__ :List[Any] = self.get_dummy_inputs(UpperCamelCase_ ) UpperCamelCase__ :str = inputs['''generator'''] UpperCamelCase__ :Optional[int] = inputs['''num_inference_steps'''] UpperCamelCase__ :Any = inputs['''output_type'''] # inputs with prompt converted to embeddings UpperCamelCase__ :List[str] = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: UpperCamelCase__ :Dict = image if mask_image is not None: UpperCamelCase__ :int = mask_image if original_image is not None: UpperCamelCase__ :Optional[int] = original_image UpperCamelCase__ :str = pipe_loaded(**UpperCamelCase_ )[0] UpperCamelCase__ :Optional[Any] = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max() self.assertLess(UpperCamelCase_ , 1e-4 ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Union[str, Any] = self.get_dummy_components() UpperCamelCase__ :List[str] = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = pipe(**UpperCamelCase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase_ ) UpperCamelCase__ :Tuple = self.pipeline_class.from_pretrained(UpperCamelCase_ ) pipe_loaded.to(UpperCamelCase_ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests UpperCamelCase__ :Tuple = self.get_dummy_inputs(UpperCamelCase_ ) UpperCamelCase__ :int = pipe_loaded(**UpperCamelCase_ )[0] UpperCamelCase__ :Tuple = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max() self.assertLess(UpperCamelCase_ , 1e-4 )
97
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time SCREAMING_SNAKE_CASE_: Optional[int] =Lock() def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(snake_case_ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() UpperCAmelCase_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left UpperCAmelCase_ = min(snake_case_ , snake_case_ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(snake_case_ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() UpperCAmelCase_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right UpperCAmelCase_ = max(snake_case_ , snake_case_ ) # after all swaps are performed, send the values back to main result_pipe[1].send(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr for i in range(1 , len(snake_case_ ) - 1 ): UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr process_array_.append( Process( target=snake_case_ , args=( len(snake_case_ ) - 1, arr[len(snake_case_ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(snake_case_ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(snake_case_ ) ): UpperCAmelCase_ = result_pipe[p][0].recv() process_array_[p].join() return arr def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*snake_case_ ) UpperCAmelCase_ = odd_even_transposition(snake_case_ ) print("Sorted List\n" ) print(*snake_case_ ) if __name__ == "__main__": main()
1
0
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys lowerCAmelCase__ : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8') lowerCAmelCase__ : int = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split() lowerCAmelCase__ : Any = '|'.join(sys.argv[1:]) lowerCAmelCase__ : List[Any] = re.compile(rF"""^({joined_dirs}).*?\.py$""") lowerCAmelCase__ : Optional[Any] = [x for x in modified_files if regex.match(x)] print(' '.join(relevant_modified_files), end='')
98
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b" UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
lowercase : List[str] = tuple[float, float, float] lowercase : int = tuple[float, float, float] def A_ ( A__ , A__ ) -> Vectorad: a__ : Optional[int] = end_pointa[0] - end_pointa[0] a__ : str = end_pointa[1] - end_pointa[1] a__ : Dict = end_pointa[2] - end_pointa[2] return (x, y, z) def A_ ( A__ , A__ ) -> Vectorad: a__ : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i a__ : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j a__ : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def A_ ( A__ , A__ ) -> bool: return tuple(round(A__ , A__ ) for x in vector ) == (0, 0, 0) def A_ ( A__ , A__ , A__ , A__ = 10 ) -> bool: a__ : Tuple = create_vector(A__ , A__ ) a__ : str = create_vector(A__ , A__ ) return is_zero_vector(get_ad_vectors_cross(A__ , A__ ) , A__ )
99
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None: '''simple docstring''' if start is None: UpperCAmelCase_ = 0 if end is None: UpperCAmelCase_ = len(snake_case_ ) - 1 if start >= end: return UpperCAmelCase_ = (start + end) // 2 slowsort(snake_case_ , snake_case_ , snake_case_ ) slowsort(snake_case_ , mid + 1 , snake_case_ ) if sequence[end] < sequence[mid]: UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end] slowsort(snake_case_ , snake_case_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
1
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=lowerCAmelCase__ , ) assert hasattr(self , """env""") def snake_case_ ( self , lowerCAmelCase__=1): # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=lowerCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase__ , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def snake_case_ ( self , lowerCAmelCase__): TrainingJobAnalytics(lowerCAmelCase__).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") def snake_case_ ( self): # create estimator __SCREAMING_SNAKE_CASE = self.create_estimator() # run training estimator.fit() # result dataframe __SCREAMING_SNAKE_CASE = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis __SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) __SCREAMING_SNAKE_CASE = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping __SCREAMING_SNAKE_CASE = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCAmelCase__)
100
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __A ( UpperCamelCase__ ): a__ : Optional[Any] = DistilBertTokenizer a__ : Any = DistilBertTokenizerFast a__ : str = True @slow def _lowercase (self : int ): UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a ) UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
1
0
from __future__ import annotations import math import random from typing import Any class lowercase : def __init__( self): lowercase = [] lowercase = 0 lowercase = 0 def A__ ( self): return self.head == self.tail def A__ ( self ,A__): self.data.append(A__) lowercase = self.tail + 1 def A__ ( self): lowercase = self.data[self.head] lowercase = self.head + 1 return ret def A__ ( self): return self.tail - self.head def A__ ( self): print(self.data) print('''**************''') print(self.data[self.head : self.tail]) class lowercase : def __init__( self ,A__): lowercase = data lowercase = None lowercase = None lowercase = 1 def A__ ( self): return self.data def A__ ( self): return self.left def A__ ( self): return self.right def A__ ( self): return self.height def A__ ( self ,A__): lowercase = data def A__ ( self ,A__): lowercase = node def A__ ( self ,A__): lowercase = node def A__ ( self ,A__): lowercase = height def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if node is None: return 0 return node.get_height() def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' if a > b: return a return b def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' print('''left rotation node:''' , node.get_data() ) lowercase = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(lowerCAmelCase__ ) lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCAmelCase__ ) lowercase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(lowerCAmelCase__ ) return ret def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' print('''right rotation node:''' , node.get_data() ) lowercase = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(lowerCAmelCase__ ) lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCAmelCase__ ) lowercase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(lowerCAmelCase__ ) return ret def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = node.get_left() assert left_child is not None node.set_left(left_rotation(lowerCAmelCase__ ) ) return right_rotation(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' lowercase = node.get_right() assert right_child is not None node.set_right(right_rotation(lowerCAmelCase__ ) ) return left_rotation(lowerCAmelCase__ ) def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' if node is None: return MyNode(lowerCAmelCase__ ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , lowerCAmelCase__ ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected lowercase = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child lowercase = right_rotation(lowerCAmelCase__ ) else: lowercase = lr_rotation(lowerCAmelCase__ ) else: node.set_right(insert_node(node.get_right() , lowerCAmelCase__ ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: lowercase = node.get_right() assert right_child is not None if data < right_child.get_data(): lowercase = rl_rotation(lowerCAmelCase__ ) else: lowercase = left_rotation(lowerCAmelCase__ ) lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(lowerCAmelCase__ ) return node def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' while True: lowercase = root.get_right() if right_child is None: break lowercase = right_child return root.get_data() def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' while True: lowercase = root.get_left() if left_child is None: break lowercase = left_child return root.get_data() def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): '''simple docstring''' lowercase = root.get_left() lowercase = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: lowercase = get_left_most(lowerCAmelCase__ ) root.set_data(lowerCAmelCase__ ) root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) ) elif left_child is not None: lowercase = left_child elif right_child is not None: lowercase = right_child else: return None elif root.get_data() > data: if left_child is None: print('''No such data''' ) return root else: root.set_left(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) ) if get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): lowercase = left_rotation(lowerCAmelCase__ ) else: lowercase = rl_rotation(lowerCAmelCase__ ) elif get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): lowercase = right_rotation(lowerCAmelCase__ ) else: lowercase = lr_rotation(lowerCAmelCase__ ) lowercase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(lowerCAmelCase__ ) return root class lowercase : def __init__( self): lowercase = None def A__ ( self): return get_height(self.root) def A__ ( self ,A__): print('''insert:''' + str(A__)) lowercase = insert_node(self.root ,A__) def A__ ( self ,A__): print('''delete:''' + str(A__)) if self.root is None: print('''Tree is empty!''') return lowercase = del_node(self.root ,A__) def __str__( self ,): # a level traversale, gives a more intuitive look on the tree lowercase = '''''' lowercase = MyQueue() q.push(self.root) lowercase = self.get_height() if layer == 0: return output lowercase = 0 while not q.is_empty(): lowercase = q.pop() lowercase = ''' ''' * int(math.pow(2 ,layer - 1)) output += space if node is None: output += "*" q.push(A__) q.push(A__) else: output += str(node.get_data()) q.push(node.get_left()) q.push(node.get_right()) output += space lowercase = cnt + 1 for i in range(1_0_0): if cnt == math.pow(2 ,A__) - 1: lowercase = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def UpperCamelCase ( ): '''simple docstring''' import doctest doctest.testmod() if __name__ == "__main__": _test() lowercase__ :Union[str, Any] = AVLtree() lowercase__ :List[str] = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
101
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE_: Tuple =[] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) UpperCAmelCase_ = value else: UpperCAmelCase_ = value return new_state_dict def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "" if is_panoptic: UpperCAmelCase_ = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:2_56, :] UpperCAmelCase_ = in_proj_bias[:2_56] UpperCAmelCase_ = in_proj_weight[2_56:5_12, :] UpperCAmelCase_ = in_proj_bias[2_56:5_12] UpperCAmelCase_ = in_proj_weight[-2_56:, :] UpperCAmelCase_ = in_proj_bias[-2_56:] def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCAmelCase_ = "resnet101" if "dc5" in model_name: UpperCAmelCase_ = True UpperCAmelCase_ = "panoptic" in model_name if is_panoptic: UpperCAmelCase_ = 2_50 else: UpperCAmelCase_ = 91 UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "coco-detection-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} # load image processor UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection" UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ ) # prepare image UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" ) UpperCAmelCase_ = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval() UpperCAmelCase_ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCAmelCase_ = "conditional_detr." + src rename_key(snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ = rename_backbone_keys(snake_case_ ) # query, key and value matrices need special treatment read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val # finally, create HuggingFace model and load state dict UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion UpperCAmelCase_ = conditional_detr(snake_case_ ) UpperCAmelCase_ = model(snake_case_ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
1
0
"""simple docstring""" import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification SCREAMING_SNAKE_CASE : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co SCREAMING_SNAKE_CASE : str = """main""" # Default branch name SCREAMING_SNAKE_CASE : str = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) SCREAMING_SNAKE_CASE : Union[str, Any] = """aaaaaaa""" # This commit does not exist, so we should 404. SCREAMING_SNAKE_CASE : str = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes SCREAMING_SNAKE_CASE : str = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def lowercase ( ) ->List[Any]: """simple docstring""" print('''Welcome!''' ) yield print('''Bye!''' ) @contextlib.contextmanager def lowercase ( ) ->Dict: """simple docstring""" print('''Bonjour!''' ) yield print('''Au revoir!''' ) class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' assert transformers.__spec__ is not None assert importlib.util.find_spec('''transformers''' ) is not None class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' with ContextManagers([] ): print('''Transformers are awesome!''' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' with ContextManagers([context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' ) @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO ) def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' with ContextManagers([context_fr(), context_en()] ): print('''Transformers are awesome!''' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' ) @require_torch def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertEqual(find_labels(a_ ) , ['''labels'''] ) self.assertEqual(find_labels(a_ ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(a_ ) , ['''start_positions''', '''end_positions'''] ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' pass self.assertEqual(find_labels(a_ ) , ['''labels'''] ) @require_tf def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertEqual(find_labels(a_ ) , ['''labels'''] ) self.assertEqual(find_labels(a_ ) , ['''labels''', '''next_sentence_label'''] ) self.assertEqual(find_labels(a_ ) , ['''start_positions''', '''end_positions'''] ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' pass self.assertEqual(find_labels(a_ ) , ['''labels'''] ) @require_flax def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.assertEqual(find_labels(a_ ) , [] ) self.assertEqual(find_labels(a_ ) , [] ) self.assertEqual(find_labels(a_ ) , [] ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' pass self.assertEqual(find_labels(a_ ) , [] )
102
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): def __init__(self : int , *__a : Dict , **__a : str ): warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
1
0
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging A__ : Tuple = logging.get_logger(__name__) class __snake_case ( UpperCamelCase_ ): _a = ['''input_values''', '''attention_mask'''] def __init__( self : Any , A_ : int = 1 , A_ : int = 1_6_0_0_0 , A_ : float = 0.0 , A_ : bool = False , A_ : int = 8_0 , A_ : int = 1_6 , A_ : int = 6_4 , A_ : str = "hann_window" , A_ : float = 1.0 , A_ : float = 8_0 , A_ : float = 7_6_0_0 , A_ : float = 1e-10 , A_ : int = 2 , A_ : bool = True , **A_ : Union[str, Any] , ): super().__init__(feature_size=A_ , sampling_rate=A_ , padding_value=A_ , **A_) lowerCAmelCase_ : Union[str, Any] = do_normalize lowerCAmelCase_ : Dict = return_attention_mask lowerCAmelCase_ : Optional[Any] = num_mel_bins lowerCAmelCase_ : Union[str, Any] = hop_length lowerCAmelCase_ : List[str] = win_length lowerCAmelCase_ : Any = win_function lowerCAmelCase_ : Union[str, Any] = frame_signal_scale lowerCAmelCase_ : Optional[int] = fmin lowerCAmelCase_ : List[str] = fmax lowerCAmelCase_ : Optional[Any] = mel_floor lowerCAmelCase_ : List[Any] = reduction_factor lowerCAmelCase_ : Any = win_length * sampling_rate // 1_0_0_0 lowerCAmelCase_ : Union[str, Any] = hop_length * sampling_rate // 1_0_0_0 lowerCAmelCase_ : Optional[Any] = optimal_fft_length(self.sample_size) lowerCAmelCase_ : Tuple = (self.n_fft // 2) + 1 lowerCAmelCase_ : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=A_) lowerCAmelCase_ : str = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , A_ , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , A_ , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCAmelCase__ ( A_ : List[np.ndarray] , A_ : List[np.ndarray] , A_ : float = 0.0): if attention_mask is not None: lowerCAmelCase_ : int = np.array(A_ , np.intaa) lowerCAmelCase_ : Tuple = [] for vector, length in zip(A_ , attention_mask.sum(-1)): lowerCAmelCase_ : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7) if length < normed_slice.shape[0]: lowerCAmelCase_ : Any = padding_value normed_input_values.append(A_) else: lowerCAmelCase_ : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values] return normed_input_values def UpperCAmelCase__ ( self : str , A_ : np.ndarray , ): lowerCAmelCase_ : Optional[int] = spectrogram( A_ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self : Tuple , A_ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , A_ : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Optional[int] = None , A_ : bool = False , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Optional[int] = None , **A_ : str , ): if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''') if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""") else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''') if audio is not None: lowerCAmelCase_ : Optional[int] = self._process_audio( A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , ) else: lowerCAmelCase_ : Tuple = None if audio_target is not None: lowerCAmelCase_ : Tuple = self._process_audio( A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ , ) if inputs is None: return inputs_target else: lowerCAmelCase_ : Tuple = inputs_target['''input_values'''] lowerCAmelCase_ : List[Any] = inputs_target.get('''attention_mask''') if decoder_attention_mask is not None: lowerCAmelCase_ : int = decoder_attention_mask return inputs def UpperCAmelCase__ ( self : Optional[Any] , A_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , A_ : bool = False , A_ : Union[bool, str, PaddingStrategy] = False , A_ : Optional[int] = None , A_ : bool = False , A_ : Optional[int] = None , A_ : Optional[bool] = None , A_ : Optional[Union[str, TensorType]] = None , **A_ : Union[str, Any] , ): lowerCAmelCase_ : str = isinstance(A_ , np.ndarray) and len(speech.shape) > 1 if is_batched_numpy and len(speech.shape) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""") lowerCAmelCase_ : List[str] = is_batched_numpy or ( isinstance(A_ , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list))) ) if is_batched: lowerCAmelCase_ : str = [np.asarray(A_ , dtype=np.floataa) for speech in speech] elif not is_batched and not isinstance(A_ , np.ndarray): lowerCAmelCase_ : int = np.asarray(A_ , dtype=np.floataa) elif isinstance(A_ , np.ndarray) and speech.dtype is np.dtype(np.floataa): lowerCAmelCase_ : List[str] = speech.astype(np.floataa) # always return batch if not is_batched: lowerCAmelCase_ : Union[str, Any] = [speech] # needed to make pad() work on spectrogram inputs lowerCAmelCase_ : str = self.feature_size # convert into correct format for padding if is_target: lowerCAmelCase_ : Optional[Any] = [self._extract_mel_features(A_) for waveform in speech] lowerCAmelCase_ : Dict = BatchFeature({'''input_values''': features}) lowerCAmelCase_ : Union[str, Any] = self.num_mel_bins else: lowerCAmelCase_ : int = BatchFeature({'''input_values''': speech}) lowerCAmelCase_ : Tuple = self.pad( A_ , padding=A_ , max_length=A_ , truncation=A_ , pad_to_multiple_of=A_ , return_attention_mask=A_ , **A_ , ) lowerCAmelCase_ : Tuple = feature_size_hack # convert input values to correct format lowerCAmelCase_ : List[str] = padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray): lowerCAmelCase_ : List[str] = [np.asarray(A_ , dtype=np.floataa) for array in input_values] elif ( not isinstance(A_ , np.ndarray) and isinstance(input_values[0] , np.ndarray) and input_values[0].dtype is np.dtype(np.floataa) ): lowerCAmelCase_ : Any = [array.astype(np.floataa) for array in input_values] elif isinstance(A_ , np.ndarray) and input_values.dtype is np.dtype(np.floataa): lowerCAmelCase_ : Dict = input_values.astype(np.floataa) # convert attention_mask to correct format lowerCAmelCase_ : List[Any] = padded_inputs.get('''attention_mask''') if attention_mask is not None: lowerCAmelCase_ : Optional[Any] = [np.asarray(A_ , dtype=np.intaa) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: lowerCAmelCase_ : Tuple = ( attention_mask if self._get_padding_strategies(A_ , max_length=A_) is not PaddingStrategy.DO_NOT_PAD else None ) lowerCAmelCase_ : str = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=A_ , padding_value=self.padding_value) if return_tensors is not None: lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(A_) return padded_inputs def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : Any = super().to_dict() # Don't serialize these as they are derived from the other properties. lowerCAmelCase_ : List[Any] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
103
'''simple docstring''' from __future__ import annotations import queue class __A : def __init__(self : Optional[Any] , __a : str ): UpperCAmelCase_ = data UpperCAmelCase_ = None UpperCAmelCase_ = None def lowerCAmelCase_ ( ) -> TreeNode: '''simple docstring''' print("\n********Press N to stop entering at any point of time********\n" ) UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower() UpperCAmelCase_ = queue.Queue() UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = q.get() UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """ UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) UpperCAmelCase_ = left_node q.put(snake_case_ ) UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """ UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) UpperCAmelCase_ = right_node q.put(snake_case_ ) raise def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = queue.Queue() q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = queue.Queue() q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = [] while not q.empty(): UpperCAmelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = [] UpperCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(snake_case_ ) UpperCAmelCase_ = n.left # end of while means current node doesn't have left child UpperCAmelCase_ = stack.pop() # start to traverse its right child UpperCAmelCase_ = n.right def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = [] UpperCAmelCase_ = node while n or stack: while n: stack.append(snake_case_ ) UpperCAmelCase_ = n.left UpperCAmelCase_ = stack.pop() print(n.data , end="," ) UpperCAmelCase_ = n.right def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ , UpperCAmelCase_ = [], [] UpperCAmelCase_ = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str: '''simple docstring''' if not s: return "\n" + width * char UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) SCREAMING_SNAKE_CASE_: TreeNode =build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
1
0
'''simple docstring''' from __future__ import annotations import time import numpy as np lowerCAmelCase__ = [8, 5, 9, 7] lowerCAmelCase__ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] lowerCAmelCase__ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class lowercase_ : """simple docstring""" def __init__( self : Optional[Any] ,lowercase__ : list[int] ,lowercase__ : list[list[int]] ,lowercase__ : list[list[int]] ,): __lowercase = claim_vector __lowercase = allocated_resources_table __lowercase = maximum_claim_table def SCREAMING_SNAKE_CASE ( self : Tuple ): return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def SCREAMING_SNAKE_CASE ( self : str ): return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase__ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def SCREAMING_SNAKE_CASE ( self : Any ): return {self.__need().index(lowercase__ ): i for i in self.__need()} def SCREAMING_SNAKE_CASE ( self : List[str] ,**lowercase__ : List[Any] ): __lowercase = self.__need() __lowercase = self.__allocated_resources_table __lowercase = self.__available_resources() __lowercase = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 5_0 + '''\n''' ) while need_list: __lowercase = False for each_need in need_list: __lowercase = True for index, need in enumerate(lowercase__ ): if need > available_resources[index]: __lowercase = False break if execution: __lowercase = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __lowercase = original_need_index print(F"Process {process_number + 1} is executing." ) # remove the process run from stack need_list.remove(lowercase__ ) # update available/freed resources stack __lowercase = np.array(lowercase__ ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(lowercase__ ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def SCREAMING_SNAKE_CASE ( self : Optional[int] ): print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( F"P{self.__allocated_resources_table.index(lowercase__ ) + 1}" + ''' '''.join(F"{it:>8}" for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( F"P{self.__maximum_claim_table.index(lowercase__ ) + 1}" + ''' '''.join(F"{it:>8}" for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(lowercase__ ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(lowercase__ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
104
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase__ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class __A ( UpperCamelCase__ ): def _lowercase (self : str , __a : GenericTensor ): if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ) else: raise ValueError("Unsupported framework" ) return masked_index def _lowercase (self : Tuple , __a : GenericTensor ): UpperCAmelCase_ = self.get_masked_index(__a ) UpperCAmelCase_ = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def _lowercase (self : List[Any] , __a : GenericTensor ): if isinstance(__a , __a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__a ) def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ): if return_tensors is None: UpperCAmelCase_ = self.framework UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a ) self.ensure_exactly_one_mask_token(__a ) return model_inputs def _lowercase (self : str , __a : Optional[int] ): UpperCAmelCase_ = self.model(**__a ) UpperCAmelCase_ = model_inputs["input_ids"] return model_outputs def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: UpperCAmelCase_ = target_ids.shape[0] UpperCAmelCase_ = model_outputs["input_ids"][0] UpperCAmelCase_ = model_outputs["logits"] if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] UpperCAmelCase_ = outputs.numpy() UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = stable_softmax(__a , axis=-1 ) if target_ids is not None: UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) ) UpperCAmelCase_ = tf.expand_dims(__a , 0 ) UpperCAmelCase_ = tf.math.top_k(__a , k=__a ) UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy() else: UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = logits.softmax(dim=-1 ) if target_ids is not None: UpperCAmelCase_ = probs[..., target_ids] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a ) UpperCAmelCase_ = [] UpperCAmelCase_ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): UpperCAmelCase_ = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place UpperCAmelCase_ = input_ids.numpy().copy() if target_ids is not None: UpperCAmelCase_ = target_ids[p].tolist() UpperCAmelCase_ = p # Filter padding out: UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a ) UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(__a ) result.append(__a ) if single_mask: return result[0] return result def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ): if isinstance(__a , __a ): UpperCAmelCase_ = [targets] try: UpperCAmelCase_ = self.tokenizer.get_vocab() except Exception: UpperCAmelCase_ = {} UpperCAmelCase_ = [] for target in targets: UpperCAmelCase_ = vocab.get(__a , __a ) if id_ is None: UpperCAmelCase_ = self.tokenizer( __a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"] if len(__a ) == 0: logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ "We cannot replace it with anything meaningful, ignoring it" ) continue UpperCAmelCase_ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) UpperCAmelCase_ = list(set(__a ) ) if len(__a ) == 0: raise ValueError("At least one target must be provided when passed." ) UpperCAmelCase_ = np.array(__a ) return target_ids def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ): UpperCAmelCase_ = {} if targets is not None: UpperCAmelCase_ = self.get_target_ids(__a , __a ) UpperCAmelCase_ = target_ids if top_k is not None: UpperCAmelCase_ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ): UpperCAmelCase_ = super().__call__(__a , **__a ) if isinstance(__a , __a ) and len(__a ) == 1: return outputs[0] return outputs
1
0
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Tuple =DownBlockaD # noqa F405 lowerCamelCase : Dict ="""down""" def __a ( self ) -> Union[str, Any]: a : List[Any] = [-0.0_232, -0.9_869, 0.8_054, -0.0_637, -0.1_688, -1.4_264, 0.4_470, -1.3_394, 0.0_904] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : int =ResnetDownsampleBlockaD # noqa F405 lowerCamelCase : str ="""down""" def __a ( self ) -> List[str]: a : Union[str, Any] = [0.0_710, 0.2_410, -0.7_320, -1.0_757, -1.1_343, 0.3_540, -0.0_133, -0.2_576, 0.0_948] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[int] =AttnDownBlockaD # noqa F405 lowerCamelCase : Optional[int] ="""down""" def __a ( self ) -> List[str]: a : str = [0.0_636, 0.8_964, -0.6_234, -1.0_131, 0.0_844, 0.4_935, 0.3_437, 0.0_911, -0.2_957] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Dict =CrossAttnDownBlockaD # noqa F405 lowerCamelCase : Any ="""down""" def __a ( self ) -> Any: a, a : Optional[int] = super().prepare_init_args_and_inputs_for_common() a : Optional[Any] = 32 return init_dict, inputs_dict def __a ( self ) -> List[str]: a : Optional[Any] = [0.2_238, -0.7_396, -0.2_255, -0.3_829, 0.1_925, 1.1_665, 0.0_603, -0.7_295, 0.1_983] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[int] =SimpleCrossAttnDownBlockaD # noqa F405 lowerCamelCase : int ="""down""" @property def __a ( self ) -> List[str]: return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ ) def __a ( self ) -> Any: a, a : Optional[int] = super().prepare_init_args_and_inputs_for_common() a : str = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def __a ( self ) -> Dict: a : Union[str, Any] = [0.7_921, -0.0_992, -0.1_962, -0.7_695, -0.4_242, 0.7_804, 0.4_737, 0.2_765, 0.3_338] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : int =SkipDownBlockaD # noqa F405 lowerCamelCase : Optional[int] ="""down""" @property def __a ( self ) -> Any: return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ ) def __a ( self ) -> Dict: a : Any = [-0.0_845, -0.2_087, -0.2_465, 0.0_971, 0.1_900, -0.0_484, 0.2_664, 0.4_179, 0.5_069] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[Any] =AttnSkipDownBlockaD # noqa F405 lowerCamelCase : Tuple ="""down""" @property def __a ( self ) -> Union[str, Any]: return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: a : Optional[Any] = [0.5_539, 0.1_609, 0.4_924, 0.0_537, -0.1_995, 0.4_050, 0.0_979, -0.2_721, -0.0_642] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[str] =DownEncoderBlockaD # noqa F405 lowerCamelCase : Optional[Any] ="""down""" @property def __a ( self ) -> int: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: a : Dict = { "in_channels": 32, "out_channels": 32, } a : Any = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> List[str]: a : List[str] = [1.1_102, 0.5_302, 0.4_872, -0.0_023, -0.8_042, 0.0_483, -0.3_489, -0.5_632, 0.7_626] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[Any] =AttnDownEncoderBlockaD # noqa F405 lowerCamelCase : Optional[Any] ="""down""" @property def __a ( self ) -> Any: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def __a ( self ) -> Optional[int]: a : Union[str, Any] = { "in_channels": 32, "out_channels": 32, } a : Union[str, Any] = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> str: a : List[Any] = [0.8_966, -0.1_486, 0.8_568, 0.8_141, -0.9_046, -0.1_342, -0.0_972, -0.7_417, 0.1_538] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Dict =UNetMidBlockaD # noqa F405 lowerCamelCase : Tuple ="""mid""" def __a ( self ) -> str: a : Optional[Any] = { "in_channels": 32, "temb_channels": 128, } a : Union[str, Any] = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> int: a : Optional[Any] = [-0.1_062, 1.7_248, 0.3_494, 1.4_569, -0.0_910, -1.2_421, -0.9_984, 0.6_736, 1.0_028] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[int] =UNetMidBlockaDCrossAttn # noqa F405 lowerCamelCase : Optional[int] ="""mid""" def __a ( self ) -> Union[str, Any]: a, a : Dict = super().prepare_init_args_and_inputs_for_common() a : Tuple = 32 return init_dict, inputs_dict def __a ( self ) -> Optional[Any]: a : str = [0.0_187, 2.4_220, 0.4_484, 1.1_203, -0.6_121, -1.5_122, -0.8_270, 0.7_851, 1.8_335] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Optional[int] =UNetMidBlockaDSimpleCrossAttn # noqa F405 lowerCamelCase : str ="""mid""" @property def __a ( self ) -> int: return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ ) def __a ( self ) -> Dict: a, a : Union[str, Any] = super().prepare_init_args_and_inputs_for_common() a : Tuple = 32 return init_dict, inputs_dict def __a ( self ) -> Union[str, Any]: a : str = [0.7_143, 1.9_974, 0.5_448, 1.3_977, 0.1_282, -1.1_237, -1.4_238, 0.5_530, 0.8_880] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Tuple =UpBlockaD # noqa F405 lowerCamelCase : int ="""up""" @property def __a ( self ) -> List[str]: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> str: a : Optional[int] = [-0.2_041, -0.4_165, -0.3_022, 0.0_041, -0.6_628, -0.7_053, 0.1_928, -0.0_325, 0.0_523] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Dict =ResnetUpsampleBlockaD # noqa F405 lowerCamelCase : Any ="""up""" @property def __a ( self ) -> Any: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> Dict: a : int = [0.2_287, 0.3_549, -0.1_346, 0.4_797, -0.1_715, -0.9_649, 0.7_305, -0.5_864, -0.6_244] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : int =CrossAttnUpBlockaD # noqa F405 lowerCamelCase : Optional[int] ="""up""" @property def __a ( self ) -> Any: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> List[Any]: a, a : Any = super().prepare_init_args_and_inputs_for_common() a : int = 32 return init_dict, inputs_dict def __a ( self ) -> int: a : List[str] = [-0.1_403, -0.3_515, -0.0_420, -0.1_425, 0.3_167, 0.5_094, -0.2_181, 0.5_931, 0.5_582] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Any =SimpleCrossAttnUpBlockaD # noqa F405 lowerCamelCase : Any ="""up""" @property def __a ( self ) -> str: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ , include_encoder_hidden_states=lowerCAmelCase__ ) def __a ( self ) -> Dict: a, a : str = super().prepare_init_args_and_inputs_for_common() a : List[str] = 32 return init_dict, inputs_dict def __a ( self ) -> Optional[Any]: a : Dict = [0.2_645, 0.1_480, 0.0_909, 0.8_044, -0.9_758, -0.9_083, 0.0_994, -1.1_453, -0.7_402] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Any =AttnUpBlockaD # noqa F405 lowerCamelCase : int ="""up""" @property def __a ( self ) -> Dict: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) @unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" ) def __a ( self ) -> Optional[Any]: a : Any = [0.0_979, 0.1_326, 0.0_021, 0.0_659, 0.2_249, 0.0_059, 0.1_132, 0.5_952, 0.1_033] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Union[str, Any] =SkipUpBlockaD # noqa F405 lowerCamelCase : int ="""up""" @property def __a ( self ) -> List[Any]: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: a : Optional[Any] = [-0.0_893, -0.1_234, -0.1_506, -0.0_332, 0.0_123, -0.0_211, 0.0_566, 0.0_143, 0.0_362] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[Any] =AttnSkipUpBlockaD # noqa F405 lowerCamelCase : Dict ="""up""" @property def __a ( self ) -> str: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def __a ( self ) -> Dict: a : Optional[int] = [0.0_361, 0.0_617, 0.2_787, -0.0_350, 0.0_342, 0.3_421, -0.0_843, 0.0_913, 0.3_015] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Any =UpDecoderBlockaD # noqa F405 lowerCamelCase : Optional[int] ="""up""" @property def __a ( self ) -> List[str]: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def __a ( self ) -> List[str]: a : Optional[int] = {"in_channels": 32, "out_channels": 32} a : Any = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Union[str, Any]: a : Union[str, Any] = [0.4_404, 0.1_998, -0.9_886, -0.3_320, -0.3_128, -0.7_034, -0.6_955, -0.2_338, -0.3_137] super().test_output(lowerCAmelCase__ ) class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : List[str] =AttnUpDecoderBlockaD # noqa F405 lowerCamelCase : List[Any] ="""up""" @property def __a ( self ) -> Optional[int]: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def __a ( self ) -> Tuple: a : List[Any] = {"in_channels": 32, "out_channels": 32} a : int = self.dummy_input return init_dict, inputs_dict def __a ( self ) -> Optional[Any]: a : Any = [0.6_738, 0.4_491, 0.1_055, 1.0_710, 0.7_316, 0.3_339, 0.3_352, 0.1_023, 0.3_568] super().test_output(lowerCAmelCase__ )
105
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__) @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : str a__ : str a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : List[int] a__ : Optional[List[int]] = None a__ : Optional[List[int]] = None a__ : Optional[Union[int, float]] = None a__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __A ( UpperCamelCase__ ): a__ : List[InputFeatures] def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = os.path.join( __a , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , ) UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase_ = cached_features_file + ".lock" with FileLock(__a ): if os.path.exists(__a ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) UpperCAmelCase_ = torch.load(__a ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) UpperCAmelCase_ = ( processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) ) logger.info("Training examples: %s" , len(__a ) ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) logger.info("Saving features into cached file %s" , __a ) torch.save(self.features , __a ) def __len__(self : List[Any] ): return len(self.features ) def __getitem__(self : Any , __a : Optional[Any] ): return self.features[i] def _lowercase (self : Union[str, Any] ): return self.label_list if is_tf_available(): import tensorflow as tf class __A : a__ : List[InputFeatures] def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(__a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) UpperCAmelCase_ = tf.data.Dataset.from_generator( __a , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _lowercase (self : int ): return self.dataset def __len__(self : Any ): return len(self.features ) def __getitem__(self : int , __a : Union[str, Any] ): return self.features[i] def _lowercase (self : int ): return self.label_list class __A ( UpperCamelCase__ ): def _lowercase (self : List[Any] , __a : Dict ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" ) def _lowercase (self : Any , __a : List[Any] ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" ) def _lowercase (self : Any ): return ["contradiction", "entailment", "neutral"] def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ): UpperCAmelCase_ = [] for i, line in enumerate(__a ): if i == 0: continue UpperCAmelCase_ = "%s-%s" % (set_type, line[0]) UpperCAmelCase_ = line[5] UpperCAmelCase_ = line[6] UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7] UpperCAmelCase_ = line[0] examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) ) return examples def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )} UpperCAmelCase_ = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ): if ex_index % 1_00_00 == 0: logger.info("Writing example %d" % (ex_index) ) UpperCAmelCase_ = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0 UpperCAmelCase_ = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE_: int ={ 'hans': 3, } SCREAMING_SNAKE_CASE_: Any ={ 'hans': HansProcessor, }
1
0
"""simple docstring""" __UpperCamelCase : Dict = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' __UpperCamelCase : Any = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __UpperCamelCase : Dict = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
106
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Tuple ={} class __A ( UpperCamelCase__ ): a__ : int = """llama""" a__ : Any = ["""past_key_values"""] def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = hidden_size UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads # for backward compatibility if num_key_value_heads is None: UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = num_key_value_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = initializer_range UpperCAmelCase_ = rms_norm_eps UpperCAmelCase_ = pretraining_tp UpperCAmelCase_ = use_cache UpperCAmelCase_ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def _lowercase (self : List[str] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) UpperCAmelCase_ = self.rope_scaling.get("type" , __a ) UpperCAmelCase_ = self.rope_scaling.get("factor" , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
1
0
import math import unittest def __magic_name__ ( A : int ): '''simple docstring''' assert isinstance(A, A ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(A ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]: self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(11 ) ) self.assertTrue(is_prime(13 ) ) self.assertTrue(is_prime(17 ) ) self.assertTrue(is_prime(19 ) ) self.assertTrue(is_prime(23 ) ) self.assertTrue(is_prime(29 ) ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: with self.assertRaises(__lowerCamelCase ): is_prime(-19 ) self.assertFalse( is_prime(0 ) , "Zero doesn't have any positive factors, primes must have exactly two." , ) self.assertFalse( is_prime(1 ) , "One only has 1 positive factor, primes must have exactly two." , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
107
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): def _lowercase (self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase (self : str ): UpperCAmelCase_ = 1 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def _lowercase (self : int ): torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _lowercase (self : Any ): torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowercase (self : Optional[Any] ): torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__a ) def _lowercase (self : Any ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0] UpperCAmelCase_ = image[0, -3:, -3:, -1] UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase (self : Optional[int] ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowercase (self : str ): UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCAmelCase_ = unet.half() UpperCAmelCase_ = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : List[Any] ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _lowercase (self : Tuple ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _lowercase (self : List[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , ) UpperCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
1
0
"""simple docstring""" import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def a__ ( SCREAMING_SNAKE_CASE : int = 8 ): '''simple docstring''' lowerCAmelCase : List[str] = ascii_letters + digits + punctuation return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) ) def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' i -= len(SCREAMING_SNAKE_CASE ) lowerCAmelCase : Any = i // 3 lowerCAmelCase : List[Any] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowerCAmelCase : Union[str, Any] = ( chars_incl + random(SCREAMING_SNAKE_CASE , quotient + remainder ) + random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) + random(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) lowerCAmelCase : List[Any] = list(SCREAMING_SNAKE_CASE ) shuffle(SCREAMING_SNAKE_CASE ) return "".join(SCREAMING_SNAKE_CASE ) # random is a generalised function for letters, characters and numbers def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return "".join(secrets.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) ) def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' pass # Put your code here... def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' pass # Put your code here... def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' pass # Put your code here... def a__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 8 ): '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < min_length: # Your Password must be at least 8 characters long return False lowerCAmelCase : Optional[Any] = any(char in ascii_uppercase for char in password ) lowerCAmelCase : List[str] = any(char in ascii_lowercase for char in password ) lowerCAmelCase : Optional[int] = any(char in digits for char in password ) lowerCAmelCase : Union[str, Any] = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def a__ ( ): '''simple docstring''' lowerCAmelCase : int = int(input("Please indicate the max length of your password: " ).strip() ) lowerCAmelCase : int = input( "Please indicate the characters that must be in your password: " ).strip() print("Password generated:" , password_generator(SCREAMING_SNAKE_CASE ) ) print( "Alternative Password generated:" , alternative_password_generator(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , ) print("[If you are thinking of using this passsword, You better save it.]" ) if __name__ == "__main__": main()
108
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __A ( UpperCamelCase__ ): def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ): UpperCAmelCase_ = 1.0 if scale is None else scale UpperCAmelCase_ = 0.0 if loc is None else loc super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] ) @property def _lowercase (self : Union[str, Any] ): return self.base_dist.mean * self.scale + self.loc @property def _lowercase (self : List[Any] ): return self.base_dist.variance * self.scale**2 @property def _lowercase (self : List[Any] ): return self.variance.sqrt() class __A ( nn.Module ): def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ): super().__init__(**__a ) UpperCAmelCase_ = args_dim UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] ) UpperCAmelCase_ = domain_map def _lowercase (self : List[str] , __a : torch.Tensor ): UpperCAmelCase_ = [proj(__a ) for proj in self.proj] return self.domain_map(*__a ) class __A ( nn.Module ): def __init__(self : Union[str, Any] , __a : List[str] ): super().__init__() UpperCAmelCase_ = function def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ): return self.function(__a , *__a ) class __A : a__ : type a__ : int a__ : Dict[str, int] def __init__(self : List[Any] , __a : int = 1 ): UpperCAmelCase_ = dim UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim} def _lowercase (self : Any , __a : Any ): if self.dim == 1: return self.distribution_class(*__a ) else: return Independent(self.distribution_class(*__a ) , 1 ) def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ): UpperCAmelCase_ = self._base_distribution(__a ) if loc is None and scale is None: return distr else: return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim ) @property def _lowercase (self : Any ): return () if self.dim == 1 else (self.dim,) @property def _lowercase (self : Dict ): return len(self.event_shape ) @property def _lowercase (self : Tuple ): return 0.0 def _lowercase (self : List[str] , __a : int ): return ParameterProjection( in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _lowercase (self : Optional[int] , *__a : torch.Tensor ): raise NotImplementedError() @staticmethod def _lowercase (__a : torch.Tensor ): return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0 class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} a__ : type = StudentT @classmethod def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCAmelCase_ = 2.0 + cls.squareplus(__a ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"loc": 1, "scale": 1} a__ : type = Normal @classmethod def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"total_count": 1, "logits": 1} a__ : type = NegativeBinomial @classmethod def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _lowercase (self : List[str] , __a : str ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if self.dim == 1: return self.distribution_class(total_count=__a , logits=__a ) else: return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 ) def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
1
0
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple: '''simple docstring''' UpperCAmelCase : Union[str, Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : List[str] = image_size UpperCAmelCase : Tuple = patch_size UpperCAmelCase : Union[str, Any] = num_channels UpperCAmelCase : int = is_training UpperCAmelCase : str = use_labels UpperCAmelCase : List[Any] = hidden_size UpperCAmelCase : Optional[int] = num_hidden_layers UpperCAmelCase : List[str] = num_attention_heads UpperCAmelCase : Dict = intermediate_size UpperCAmelCase : Optional[int] = hidden_act UpperCAmelCase : Union[str, Any] = hidden_dropout_prob UpperCAmelCase : Optional[int] = attention_probs_dropout_prob UpperCAmelCase : str = type_sequence_label_size UpperCAmelCase : Dict = initializer_range UpperCAmelCase : Dict = mask_ratio UpperCAmelCase : Union[str, Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCAmelCase : Dict = (image_size // patch_size) ** 2 UpperCAmelCase : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : str = None if self.use_labels: UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : List[str] = TFViTMAEModel(config=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' UpperCAmelCase : Tuple = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) # expected sequence length = num_patches UpperCAmelCase : Optional[Any] = (self.image_size // self.patch_size) ** 2 UpperCAmelCase : Union[str, Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCAmelCase : List[Any] = 1 UpperCAmelCase : int = TFViTMAEForPreTraining(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : Any = self.prepare_config_and_inputs() ((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Tuple = config_and_inputs UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Dict = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __lowerCAmelCase : List[Any] = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} __lowerCAmelCase : str = False __lowerCAmelCase : Optional[int] = False __lowerCAmelCase : List[Any] = False __lowerCAmelCase : Tuple = False def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] = TFViTMAEModelTester(self ) UpperCAmelCase : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCAmelCase : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Optional[int] = [*signature.parameters.keys()] UpperCAmelCase : Dict = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = copy.deepcopy(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : str = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = outputs_dict[0].numpy() UpperCAmelCase : Dict = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_SCREAMING_SNAKE_CASE ): UpperCAmelCase : Optional[Any] = {} for k, v in inputs_dict.items(): if tf.is_tensor(_SCREAMING_SNAKE_CASE ): UpperCAmelCase : int = v.numpy() else: UpperCAmelCase : str = np.array(_SCREAMING_SNAKE_CASE ) return inputs_np_dict for model_class in self.all_model_classes: UpperCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = prepare_numpy_arrays(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' np.random.seed(2 ) UpperCAmelCase : List[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCAmelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : List[str] = tf.constant(_SCREAMING_SNAKE_CASE ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCAmelCase : Any = tf_noise super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Union[str, Any] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_SCREAMING_SNAKE_CASE ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_SCREAMING_SNAKE_CASE , """_keras_serializable""" , _SCREAMING_SNAKE_CASE ) } UpperCAmelCase : List[Any] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCAmelCase : str = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: UpperCAmelCase : List[Any] = main_layer_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCAmelCase : str = tf.keras.Model(_SCREAMING_SNAKE_CASE , outputs=main_layer(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE , """keras_model.h5""" ) model.save(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = tf.keras.models.load_model( _SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_SCREAMING_SNAKE_CASE , tf.keras.Model ) UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE ) self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Dict = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : int = outputs.last_hidden_state.numpy() UpperCAmelCase : int = 0 else: UpperCAmelCase : Optional[Any] = outputs.logits.numpy() UpperCAmelCase : int = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) if model_class.__name__ == "TFViTMAEModel": UpperCAmelCase : Tuple = after_outputs["""last_hidden_state"""].numpy() UpperCAmelCase : Optional[int] = 0 else: UpperCAmelCase : str = after_outputs["""logits"""].numpy() UpperCAmelCase : Dict = 0 UpperCAmelCase : List[Any] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' np.random.seed(2 ) UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[str] = int((config.image_size // config.patch_size) ** 2 ) UpperCAmelCase : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCAmelCase : List[Any] = model_class.from_config(model.config ) UpperCAmelCase : List[Any] = new_model(_SCREAMING_SNAKE_CASE ) # Build model new_model.set_weights(model.get_weights() ) UpperCAmelCase : Union[str, Any] = new_model(_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) self.assert_outputs_same(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' pass @slow def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[str] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def _snake_case ( ): UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' np.random.seed(2 ) UpperCAmelCase : Optional[int] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) UpperCAmelCase : List[str] = self.default_image_processor UpperCAmelCase : Optional[int] = prepare_img() UpperCAmelCase : int = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCAmelCase : int = ViTMAEConfig() UpperCAmelCase : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCAmelCase : Any = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCAmelCase : str = model(**_SCREAMING_SNAKE_CASE , noise=_SCREAMING_SNAKE_CASE ) # verify the logits UpperCAmelCase : List[str] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
109
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def _lowercase (self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ): UpperCAmelCase_ = 0.0 for i, j in zip(__a , __a ): n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0 UpperCAmelCase_ = n_correct / len(__a ) return { "accuracy": accuracy, }
1
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
110
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]: '''simple docstring''' model.train() UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict: '''simple docstring''' set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(snake_case_ ) UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ )) if accelerator.num_processes > 1: check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ = RegressionDataset(length=96 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if iteration < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if batch_num < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(snake_case_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(snake_case_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case_ , snake_case_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Dict ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
1
0
"""simple docstring""" from __future__ import annotations import math def __lowercase ( snake_case_ : int ) ->list[int]: '''simple docstring''' if num <= 0: __A : Dict = F"""{num}: Invalid input, please enter a positive integer.""" raise ValueError(snake_case_ ) __A : Union[str, Any] = [True] * (num + 1) __A : int = [] __A : int = 2 __A : str = int(math.sqrt(snake_case_ ) ) while start <= end: # If start is a prime if sieve[start] is True: prime.append(snake_case_ ) # Set multiples of start be False for i in range(start * start ,num + 1 ,snake_case_ ): if sieve[i] is True: __A : str = False start += 1 for j in range(end + 1 ,num + 1 ): if sieve[j] is True: prime.append(snake_case_ ) return prime if __name__ == "__main__": print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
179
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(snake_case_ , x % y ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int: '''simple docstring''' UpperCAmelCase_ = 1 for i in range(1 , n + 1 ): UpperCAmelCase_ = lcm(snake_case_ , snake_case_ ) return g if __name__ == "__main__": print(f"{solution() = }")
1
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def __snake_case ( _lowerCAmelCase : List[str] ) -> List[str]: monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def __snake_case ( _lowerCAmelCase : Optional[Any] ) -> Optional[Any]: class __magic_name__ : """simple docstring""" def __init__( self :Dict , snake_case :Any ): '''simple docstring''' A_ : List[Any] = metric_id class __magic_name__ : """simple docstring""" __UpperCamelCase = [MetricMock(UpperCamelCase__ ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]] def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple ) -> Optional[int]: if "tmp_path" in args: A_ : int = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(snake_case_ , match="https://huggingface.co/docs/evaluate" ): func(*snake_case_ )
300
'''simple docstring''' import os from math import logaa def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int: '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ): UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) ) if x * logaa(snake_case_ ) > largest: UpperCAmelCase_ = x * logaa(snake_case_ ) UpperCAmelCase_ = i + 1 return result if __name__ == "__main__": print(solution())
1
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class __magic_name__ : def __init__( self : Any , lowercase_ : str , lowercase_ : List[str]=13 , lowercase_ : Optional[int]=30 , lowercase_ : Tuple=2 , lowercase_ : str=3 , lowercase_ : Tuple=True , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=32 , lowercase_ : Optional[int]=2 , lowercase_ : int=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : int=0.1 , lowercase_ : int=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : Dict=3 , lowercase_ : Optional[int]=None , lowercase_ : List[str]=2 , ): lowercase_ : Optional[int] = parent lowercase_ : Optional[Any] = batch_size lowercase_ : List[str] = image_size lowercase_ : Any = patch_size lowercase_ : List[str] = num_channels lowercase_ : Dict = is_training lowercase_ : Union[str, Any] = use_labels lowercase_ : List[str] = hidden_size lowercase_ : Tuple = num_hidden_layers lowercase_ : Union[str, Any] = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : Tuple = hidden_act lowercase_ : List[str] = hidden_dropout_prob lowercase_ : Any = attention_probs_dropout_prob lowercase_ : Tuple = type_sequence_label_size lowercase_ : str = initializer_range lowercase_ : str = scope lowercase_ : Optional[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase_ : Tuple = (image_size // patch_size) ** 2 lowercase_ : Optional[Any] = num_patches + 2 def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Optional[int] = None if self.use_labels: lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : List[str] ): return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Any , lowercase_ : Dict ): lowercase_ : Tuple = TFDeiTModel(config=__a ) lowercase_ : str = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Tuple ): lowercase_ : Union[str, Any] = TFDeiTForMaskedImageModeling(config=__a ) lowercase_ : Optional[Any] = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase_ : Optional[int] = 1 lowercase_ : Union[str, Any] = TFDeiTForMaskedImageModeling(__a ) lowercase_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ : str = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : int ): lowercase_ : List[Any] = self.type_sequence_label_size lowercase_ : Tuple = TFDeiTForImageClassification(__a ) lowercase_ : Optional[int] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase_ : int = 1 lowercase_ : Dict = TFDeiTForImageClassification(__a ) lowercase_ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ : Dict = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[Any] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = config_and_inputs lowercase_ : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __magic_name__ ( UpperCamelCase__, UpperCamelCase__, unittest.TestCase): UpperCamelCase__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) UpperCamelCase__ = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : Tuple = TFDeiTModelTester(self ) lowercase_ : List[str] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : int = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowercase_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , tf.keras.layers.Dense ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = model_class(__a ) lowercase_ : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : List[str] = [*signature.parameters.keys()] lowercase_ : str = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def SCREAMING_SNAKE_CASE_ ( self : Any ): lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Union[str, Any]=False ): lowercase_ : Optional[Any] = super()._prepare_for_class(__a , __a , return_labels=__a ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def SCREAMING_SNAKE_CASE_ ( self : int ): for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : str = TFDeiTModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCamelCase ( ) -> Dict: lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __magic_name__ ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : List[str] = prepare_img() lowercase_ : Any = image_processor(images=__a , return_tensors="""tf""" ) # forward pass lowercase_ : int = model(**__a ) # verify the logits lowercase_ : int = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) lowercase_ : int = tf.constant([-1.02_66, 0.19_12, -1.28_61] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
239
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = checkpoint UpperCAmelCase_ = {} UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["quant_conv.bias"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ ) } for i in range(snake_case_ ): UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""" ) UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""" ) UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) for i in range(snake_case_ ): UpperCAmelCase_ = num_up_blocks - 1 - i UpperCAmelCase_ = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) return new_checkpoint def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict: '''simple docstring''' UpperCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCAmelCase_ = io.BytesIO(r.content ) UpperCAmelCase_ = OmegaConf.load(snake_case_ ) UpperCAmelCase_ = 5_12 UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCAmelCase_ = {} with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f: for key in f.keys(): UpperCAmelCase_ = f.get_tensor(snake_case_ ) else: UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"] # Convert the VAE model. UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ ) UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ ) UpperCAmelCase_ = AutoencoderKL(**snake_case_ ) vae.load_state_dict(snake_case_ ) vae.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') SCREAMING_SNAKE_CASE_: str =parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
1
0
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin A : int = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class A ( UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' A__ = XLNetTokenizer A__ = XLNetTokenizerFast A__ = True A__ = True def lowerCamelCase__ (self : Union[str, Any] ) -> Dict: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = XLNetTokenizer(__a , keep_accents=__a ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ (self : Dict ) -> int: """simple docstring""" lowercase__ = """<s>""" lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def lowerCamelCase__ (self : Optional[Any] ) -> Dict: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<unk>""" ) self.assertEqual(vocab_keys[1] , """<s>""" ) self.assertEqual(vocab_keys[-1] , """<eod>""" ) self.assertEqual(len(__a ) , 1006 ) def lowerCamelCase__ (self : Tuple ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def lowerCamelCase__ (self : Tuple ) -> int: """simple docstring""" lowercase__ = XLNetTokenizer(__a , keep_accents=__a ) lowercase__ = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] ) lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowercase__ = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) lowercase__ = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCamelCase__ (self : Any ) -> Any: """simple docstring""" lowercase__ = XLNetTokenizer(__a , do_lower_case=__a ) lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """""", """i""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] ) def lowerCamelCase__ (self : int ) -> Dict: """simple docstring""" lowercase__ = XLNetTokenizer(__a , do_lower_case=__a ) lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """se""", """.""", ] , ) @slow def lowerCamelCase__ (self : str ) -> Optional[Any]: """simple docstring""" lowercase__ = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" ) lowercase__ = tokenizer.encode("""sequence builders""" , add_special_tokens=__a ) lowercase__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__a ) lowercase__ = tokenizer.build_inputs_with_special_tokens(__a ) lowercase__ = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def lowerCamelCase__ (self : Optional[int] ) -> List[str]: """simple docstring""" lowercase__ = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
305
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __A ( unittest.TestCase ): def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (image_size // patch_size) ** 2 UpperCAmelCase_ = num_patches + 1 def _lowercase (self : Any ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , ) return config, pixel_values def _lowercase (self : Dict , __a : Any , __a : List[Any] ): UpperCAmelCase_ = FlaxViTModel(config=__a ) UpperCAmelCase_ = model(__a ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (self.image_size, self.image_size) UpperCAmelCase_ = (self.patch_size, self.patch_size) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _lowercase (self : Tuple , __a : str , __a : Any ): UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = FlaxViTForImageClassification(config=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = FlaxViTForImageClassification(__a ) UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _lowercase (self : Any ): UpperCAmelCase_ = FlaxViTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(__a , __a ) UpperCAmelCase_ = model_class(__a ) @jax.jit def model_jitted(__a : Tuple , **__a : List[Any] ): return model(pixel_values=__a , **__a ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase (self : Tuple ): for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__a )
1
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase : Tuple = { 'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'], 'configuration_maskformer_swin': ['MaskFormerSwinConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : int = ['MaskFormerFeatureExtractor'] lowerCAmelCase : Any = ['MaskFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[str] = [ 'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'MaskFormerForInstanceSegmentation', 'MaskFormerModel', 'MaskFormerPreTrainedModel', ] lowerCAmelCase : Dict = [ 'MaskFormerSwinBackbone', 'MaskFormerSwinModel', 'MaskFormerSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
291
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 5 # Realm tok UpperCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__a , exist_ok=__a ) def _lowercase (self : Optional[Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def _lowercase (self : Any ): shutil.rmtree(self.tmpdirname ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase (self : List[str] ): UpperCAmelCase_ = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase (self : Any ): UpperCAmelCase_ = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__a , ) return block_records def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _lowercase (self : int ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual([False, True, True] , __a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: UpperCAmelCase_ = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
1
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K) def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float: '''simple docstring''' if donor_conc <= 0: raise ValueError("Donor concentration should be positive" ) elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive" ) elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive" ) elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class A__ ( datasets.BuilderConfig ): """simple docstring""" UpperCamelCase_ : Optional[datasets.Features] = None class A__ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCamelCase_ : str = PandasConfig def _lowerCAmelCase ( self : Any ) -> List[Any]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[str] ) -> Optional[int]: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _UpperCAmelCase : List[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__a , (str, list, tuple) ): _UpperCAmelCase : int = data_files if isinstance(__a , __a ): _UpperCAmelCase : Optional[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _UpperCAmelCase : int = [dl_manager.iter_files(__a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] _UpperCAmelCase : List[str] = [] for split_name, files in data_files.items(): if isinstance(__a , __a ): _UpperCAmelCase : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _UpperCAmelCase : Dict = [dl_manager.iter_files(__a ) for file in files] splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) ) return splits def _lowerCAmelCase ( self : str , lowerCAmelCase__ : pa.Table ) -> Optional[Any]: """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _UpperCAmelCase : Optional[Any] = table_cast(__a , self.config.features.arrow_schema ) return pa_table def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(__a ) ): with open(__a , "rb" ) as f: _UpperCAmelCase : Optional[int] = pa.Table.from_pandas(pd.read_pickle(__a ) ) yield i, self._cast_table(__a )
145
'''simple docstring''' import math def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = input("Enter message: " ) UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) ) UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " ) if mode.lower().startswith("e" ): UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ ) elif mode.lower().startswith("d" ): UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + "|"}""" ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = [""] * key for col in range(snake_case_ ): UpperCAmelCase_ = col while pointer < len(snake_case_ ): cipher_text[col] += message[pointer] pointer += key return "".join(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key ) UpperCAmelCase_ = key UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ ) UpperCAmelCase_ = [""] * num_cols UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase_ = 0 row += 1 return "".join(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
0
'''simple docstring''' import numpy as np lowerCamelCase : Dict = [ ['a', 'b', 'c', 'd', 'e'], ['f', 'g', 'h', 'i', 'k'], ['l', 'm', 'n', 'o', 'p'], ['q', 'r', 's', 't', 'u'], ['v', 'w', 'x', 'y', 'z'], ] class A__ : def __init__( self : Tuple ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =np.array(__a ) def A ( self : Union[str, Any] , _a : str ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.where(letter == self.SQUARE ) _SCREAMING_SNAKE_CASE =np.concatenate([indexa + 1, indexa + 1] ) return indexes def A ( self : str , _a : int , _a : int ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.SQUARE[indexa - 1, indexa - 1] return letter def A ( self : Optional[Any] , _a : str ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =message.lower() _SCREAMING_SNAKE_CASE =message.replace(' ' , '' ) _SCREAMING_SNAKE_CASE =message.replace('j' , 'i' ) _SCREAMING_SNAKE_CASE =np.empty((2, len(__a )) ) for letter_index in range(len(__a ) ): _SCREAMING_SNAKE_CASE =self.letter_to_numbers(message[letter_index] ) _SCREAMING_SNAKE_CASE =numbers[0] _SCREAMING_SNAKE_CASE =numbers[1] _SCREAMING_SNAKE_CASE =first_step.reshape(2 * len(__a ) ) _SCREAMING_SNAKE_CASE ='' for numbers_index in range(len(__a ) ): _SCREAMING_SNAKE_CASE =int(second_step[numbers_index * 2] ) _SCREAMING_SNAKE_CASE =int(second_step[(numbers_index * 2) + 1] ) _SCREAMING_SNAKE_CASE =self.numbers_to_letter(__a , __a ) _SCREAMING_SNAKE_CASE =encoded_message + letter return encoded_message def A ( self : Dict , _a : str ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =message.lower() message.replace(' ' , '' ) _SCREAMING_SNAKE_CASE =np.empty(2 * len(__a ) ) for letter_index in range(len(__a ) ): _SCREAMING_SNAKE_CASE =self.letter_to_numbers(message[letter_index] ) _SCREAMING_SNAKE_CASE =numbers[0] _SCREAMING_SNAKE_CASE =numbers[1] _SCREAMING_SNAKE_CASE =first_step.reshape((2, len(__a )) ) _SCREAMING_SNAKE_CASE ='' for numbers_index in range(len(__a ) ): _SCREAMING_SNAKE_CASE =int(second_step[0, numbers_index] ) _SCREAMING_SNAKE_CASE =int(second_step[1, numbers_index] ) _SCREAMING_SNAKE_CASE =self.numbers_to_letter(__a , __a ) _SCREAMING_SNAKE_CASE =decoded_message + letter return decoded_message
47
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger() SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] , __a : str ): os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = {"source": "What is love ?", "target": "life"} UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f: f.write(__a ) def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = os.path.join(__a , "output" ) UpperCAmelCase_ = os.path.join(__a , "data" ) self._create_dummy_data(data_dir=__a ) UpperCAmelCase_ = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__a , env=self.get_env() ) UpperCAmelCase_ = os.path.join(__a , "metrics.json" ) with open(__a ) as f: UpperCAmelCase_ = json.load(__a ) return result @require_torch_gpu def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def _lowercase (self : Dict ): UpperCAmelCase_ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def _lowercase (self : Any ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
1
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : Optional[Any] = logging.get_logger(__name__) __lowerCamelCase : List[Any] = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class A__ ( UpperCamelCase__ , UpperCamelCase__ ): _UpperCAmelCase :int = """convnextv2""" def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**__a ) UpperCamelCase : Tuple = num_channels UpperCamelCase : List[str] = patch_size UpperCamelCase : List[Any] = num_stages UpperCamelCase : str = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : Optional[int] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Any = hidden_act UpperCamelCase : List[str] = initializer_range UpperCamelCase : Optional[int] = layer_norm_eps UpperCamelCase : List[str] = drop_path_rate UpperCamelCase : int = image_size UpperCamelCase : Optional[Any] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names )
52
'''simple docstring''' from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time SCREAMING_SNAKE_CASE_: Optional[int] =Lock() def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(snake_case_ ) process_lock.release() # receive your right neighbor's value process_lock.acquire() UpperCAmelCase_ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left UpperCAmelCase_ = min(snake_case_ , snake_case_ ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(snake_case_ ) process_lock.release() # receive your left neighbor's value process_lock.acquire() UpperCAmelCase_ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right UpperCAmelCase_ = max(snake_case_ , snake_case_ ) # after all swaps are performed, send the values back to main result_pipe[1].send(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr for i in range(1 , len(snake_case_ ) - 1 ): UpperCAmelCase_ = Pipe() UpperCAmelCase_ = Pipe() process_array_.append( Process( target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) UpperCAmelCase_ = temp_rs UpperCAmelCase_ = temp_rr process_array_.append( Process( target=snake_case_ , args=( len(snake_case_ ) - 1, arr[len(snake_case_ ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(snake_case_ ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(snake_case_ ) ): UpperCAmelCase_ = result_pipe[p][0].recv() process_array_[p].join() return arr def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = list(range(10 , 0 , -1 ) ) print("Initial List" ) print(*snake_case_ ) UpperCAmelCase_ = odd_even_transposition(snake_case_ ) print("Sorted List\n" ) print(*snake_case_ ) if __name__ == "__main__": main()
1
0
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset SCREAMING_SNAKE_CASE__ = pd.read_csv( "https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/" "position_salaries.csv" ) SCREAMING_SNAKE_CASE__ = dataset.iloc[:, 1:2].values SCREAMING_SNAKE_CASE__ = dataset.iloc[:, 2].values SCREAMING_SNAKE_CASE__ = train_test_split(X, y, test_size=0.2, random_state=0) SCREAMING_SNAKE_CASE__ = PolynomialFeatures(degree=4) SCREAMING_SNAKE_CASE__ = poly_reg.fit_transform(X) SCREAMING_SNAKE_CASE__ = LinearRegression() pol_reg.fit(X_poly, y) def lowerCAmelCase__ ( ) -> Tuple: """simple docstring""" plt.scatter(snake_case_ , snake_case_ , color='red' ) plt.plot(snake_case_ , pol_reg.predict(poly_reg.fit_transform(snake_case_ ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
150
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError("the value of both inputs must be positive" ) UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] # remove the leading "0b" UpperCAmelCase_ = str(bin(snake_case_ ) )[2:] UpperCAmelCase_ = max(len(snake_case_ ) , len(snake_case_ ) ) return "0b" + "".join( str(int("1" in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
"""simple docstring""" def _snake_case ( lowercase__ ): return 10 - x * x def _snake_case ( lowercase__ , lowercase__ ): if equation(snake_case_ ) * equation(snake_case_ ) >= 0: raise ValueError('Wrong space!' ) _lowerCamelCase : Union[str, Any] = a while (b - a) >= 0.0_1: # Find middle point _lowerCamelCase : Optional[int] = (a + b) / 2 # Check if middle point is root if equation(snake_case_ ) == 0.0: break # Decide the side to repeat the steps if equation(snake_case_ ) * equation(snake_case_ ) < 0: _lowerCamelCase : Dict = c else: _lowerCamelCase : Union[str, Any] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
96
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ) -> None: '''simple docstring''' if start is None: UpperCAmelCase_ = 0 if end is None: UpperCAmelCase_ = len(snake_case_ ) - 1 if start >= end: return UpperCAmelCase_ = (start + end) // 2 slowsort(snake_case_ , snake_case_ , snake_case_ ) slowsort(snake_case_ , mid + 1 , snake_case_ ) if sequence[end] < sequence[mid]: UpperCAmelCase_ , UpperCAmelCase_ = sequence[mid], sequence[end] slowsort(snake_case_ , snake_case_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
1
0
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class __snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self , __lowerCamelCase , __lowerCamelCase=13 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=99 , __lowerCamelCase=32 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=37 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=16 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=4 , ): '''simple docstring''' __A : int = parent __A : Dict = batch_size __A : Optional[int] = seq_length __A : List[str] = is_training __A : int = use_attention_mask __A : Optional[int] = use_token_type_ids __A : Any = use_labels __A : Dict = vocab_size __A : str = hidden_size __A : int = num_hidden_layers __A : List[Any] = num_attention_heads __A : Optional[int] = intermediate_size __A : Any = hidden_act __A : Any = hidden_dropout_prob __A : Dict = attention_probs_dropout_prob __A : Union[str, Any] = max_position_embeddings __A : Dict = type_vocab_size __A : Union[str, Any] = type_sequence_label_size __A : List[Any] = initializer_range __A : str = num_choices def UpperCamelCase__( self ): '''simple docstring''' __A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __A : Optional[Any] = None if self.use_attention_mask: __A : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) __A : Any = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__a , ) return config, input_ids, attention_mask def UpperCamelCase__( self ): '''simple docstring''' __A : Optional[Any] = self.prepare_config_and_inputs() __A , __A , __A : Optional[Any] = config_and_inputs __A : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __snake_case ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def UpperCamelCase__( self ): '''simple docstring''' __A : Union[str, Any] = FlaxDistilBertModelTester(self ) @slow def UpperCamelCase__( self ): '''simple docstring''' for model_class_name in self.all_model_classes: __A : Optional[int] = model_class_name.from_pretrained('''distilbert-base-uncased''' ) __A : Any = model(np.ones((1, 1) ) ) self.assertIsNotNone(__a ) @require_flax class __snake_case ( unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__( self ): '''simple docstring''' __A : Tuple = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __A : int = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __A : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __A : str = model(__a , attention_mask=__a )[0] __A : int = (1, 11, 768) self.assertEqual(output.shape , __a ) __A : Any = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
179
'''simple docstring''' from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class __A ( UpperCamelCase__ ): a__ : Optional[Any] = DistilBertTokenizer a__ : Any = DistilBertTokenizerFast a__ : str = True @slow def _lowercase (self : int ): UpperCAmelCase_ = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) UpperCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=__a ) UpperCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a ) UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
1
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase : Dict = {'vocab_file': 'spm_char.model'} _lowerCAmelCase : int = { 'vocab_file': { 'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model', 'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model', 'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model', } } _lowerCAmelCase : Any = { 'microsoft/speecht5_asr': 1_024, 'microsoft/speecht5_tts': 1_024, 'microsoft/speecht5_vc': 1_024, } class __magic_name__ ( UpperCamelCase__ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self :int , snake_case :List[str] , snake_case :Optional[Any]="<s>" , snake_case :Optional[Any]="</s>" , snake_case :Tuple="<unk>" , snake_case :Optional[Any]="<pad>" , snake_case :Optional[Dict[str, Any]] = None , **snake_case :Tuple , ): '''simple docstring''' A_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) A_ : List[str] = vocab_file A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__a ) @property def SCREAMING_SNAKE_CASE ( self :Dict ): '''simple docstring''' return self.sp_model.get_piece_size() def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self :Optional[Any] ): '''simple docstring''' A_ : Any = self.__dict__.copy() A_ : str = None return state def __setstate__( self :int , snake_case :Any ): '''simple docstring''' A_ : Tuple = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): A_ : List[str] = {} A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :str ): '''simple docstring''' return self.sp_model.encode(__a , out_type=__a ) def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :Union[str, Any] ): '''simple docstring''' return self.sp_model.piece_to_id(__a ) def SCREAMING_SNAKE_CASE ( self :int , snake_case :Any ): '''simple docstring''' A_ : Any = self.sp_model.IdToPiece(__a ) return token def SCREAMING_SNAKE_CASE ( self :Any , snake_case :List[Any] ): '''simple docstring''' A_ : Any = [] A_ : Any = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__a ) + token A_ : List[str] = [] else: current_sub_tokens.append(__a ) out_string += self.sp_model.decode(__a ) return out_string.strip() def SCREAMING_SNAKE_CASE ( self :str , snake_case :Union[str, Any] , snake_case :List[Any]=None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :List[int] , snake_case :Optional[List[int]] = None , snake_case :bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) A_ : Union[str, Any] = [1] if token_ids_a is None: return ([0] * len(__a )) + suffix_ones return ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :str , snake_case :Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__a ): logger.error(f"Vocabulary path ({save_directory}) should be a directory" ) return A_ : Optional[Any] = os.path.join( __a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __a ) elif not os.path.isfile(self.vocab_file ): with open(__a , "wb" ) as fi: A_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__a ) return (out_vocab_file,)
300
'''simple docstring''' import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) SCREAMING_SNAKE_CASE_: Tuple =[] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight", f"decoder.layers.{i}.encoder_attn.out_proj.weight", ) ) rename_keys.append( ( f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias", f"decoder.layers.{i}.encoder_attn.out_proj.bias", ) ) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias") ) rename_keys.append( (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias") ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias") ) rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias")) rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias")) rename_keys.append( (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'), ('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'), ('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'), ('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'), ('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'), ('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'), ('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'), ('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'), ('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'), ('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'), ] ) def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict: '''simple docstring''' UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) UpperCAmelCase_ = value else: UpperCAmelCase_ = value return new_state_dict def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = "" if is_panoptic: UpperCAmelCase_ = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCAmelCase_ = in_proj_weight[:2_56, :] UpperCAmelCase_ = in_proj_bias[:2_56] UpperCAmelCase_ = in_proj_weight[2_56:5_12, :] UpperCAmelCase_ = in_proj_bias[2_56:5_12] UpperCAmelCase_ = in_proj_weight[-2_56:, :] UpperCAmelCase_ = in_proj_bias[-2_56:] def lowerCAmelCase_ ( ) -> Dict: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCAmelCase_ = "resnet101" if "dc5" in model_name: UpperCAmelCase_ = True UpperCAmelCase_ = "panoptic" in model_name if is_panoptic: UpperCAmelCase_ = 2_50 else: UpperCAmelCase_ = 91 UpperCAmelCase_ = "huggingface/label-files" UpperCAmelCase_ = "coco-detection-id2label.json" UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()} UpperCAmelCase_ = idalabel UpperCAmelCase_ = {v: k for k, v in idalabel.items()} # load image processor UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection" UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ ) # prepare image UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" ) UpperCAmelCase_ = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval() UpperCAmelCase_ = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCAmelCase_ = "conditional_detr." + src rename_key(snake_case_ , snake_case_ , snake_case_ ) UpperCAmelCase_ = rename_backbone_keys(snake_case_ ) # query, key and value matrices need special treatment read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): UpperCAmelCase_ = state_dict.pop(snake_case_ ) UpperCAmelCase_ = val # finally, create HuggingFace model and load state dict UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ ) model.load_state_dict(snake_case_ ) model.eval() model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion UpperCAmelCase_ = conditional_detr(snake_case_ ) UpperCAmelCase_ = model(snake_case_ ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser() parser.add_argument( '--model_name', default='conditional_detr_resnet50', type=str, help='Name of the CONDITIONAL_DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) SCREAMING_SNAKE_CASE_: int =parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
1
0
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __magic_name__ ( UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, unittest.TestCase): UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''}) UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def SCREAMING_SNAKE_CASE_ ( self : List[str] ): torch.manual_seed(0 ) lowercase_ : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) lowercase_ : str = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) lowercase_ : Tuple = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) lowercase_ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase_ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase_ : str = CLIPTextModel(__a ) lowercase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ : Union[str, Any] = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[Any]=0 ): if str(__a ).startswith("""mps""" ): lowercase_ : Any = torch.manual_seed(__a ) else: lowercase_ : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) lowercase_ : Union[str, Any] = 2 lowercase_ : List[Any] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ) lowercase_ : Dict = floats_tensor(control_image.shape , rng=random.Random(__a ) ).to(__a ) lowercase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ : Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) ) lowercase_ : List[str] = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class __magic_name__ ( UpperCamelCase__, UpperCamelCase__, unittest.TestCase): UpperCamelCase__ = StableDiffusionControlNetImgaImgPipeline UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""} UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase__ = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def SCREAMING_SNAKE_CASE_ ( self : Dict ): torch.manual_seed(0 ) lowercase_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(lowercase_ : Union[str, Any] ): if isinstance(__a , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowercase_ : Optional[int] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__a ) torch.manual_seed(0 ) lowercase_ : Optional[int] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__a ) torch.manual_seed(0 ) lowercase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) lowercase_ : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase_ : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase_ : Dict = CLIPTextModel(__a ) lowercase_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase_ : Tuple = MultiControlNetModel([controlneta, controlneta] ) lowercase_ : Dict = { """unet""": unet, """controlnet""": controlnet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int , lowercase_ : str=0 ): if str(__a ).startswith("""mps""" ): lowercase_ : Dict = torch.manual_seed(__a ) else: lowercase_ : Any = torch.Generator(device=__a ).manual_seed(__a ) lowercase_ : Optional[Any] = 2 lowercase_ : List[Any] = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__a , device=torch.device(__a ) , ), ] lowercase_ : Any = floats_tensor(control_image[0].shape , rng=random.Random(__a ) ).to(__a ) lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowercase_ : Dict = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) ) lowercase_ : Any = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", """image""": image, """control_image""": control_image, } return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): lowercase_ : List[str] = self.get_dummy_components() lowercase_ : Union[str, Any] = self.pipeline_class(**__a ) pipe.to(__a ) lowercase_ : List[Any] = 10.0 lowercase_ : List[Any] = 4 lowercase_ : int = self.get_dummy_inputs(__a ) lowercase_ : Optional[int] = steps lowercase_ : str = scale lowercase_ : Any = pipe(**__a )[0] lowercase_ : int = self.get_dummy_inputs(__a ) lowercase_ : Optional[int] = steps lowercase_ : List[str] = scale lowercase_ : Union[str, Any] = pipe(**__a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowercase_ : Tuple = self.get_dummy_inputs(__a ) lowercase_ : List[Any] = steps lowercase_ : Tuple = scale lowercase_ : str = pipe(**__a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowercase_ : int = self.get_dummy_inputs(__a ) lowercase_ : int = steps lowercase_ : Union[str, Any] = scale lowercase_ : Optional[int] = pipe(**__a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def SCREAMING_SNAKE_CASE_ ( self : Any ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def SCREAMING_SNAKE_CASE_ ( self : str ): lowercase_ : Any = self.get_dummy_components() lowercase_ : str = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__a ) except NotImplementedError: pass @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): lowercase_ : Tuple = ControlNetModel.from_pretrained("""lllyasviel/sd-controlnet-canny""" ) lowercase_ : str = StableDiffusionControlNetImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , safety_checker=__a , controlnet=__a ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__a ) lowercase_ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase_ : str = """evil space-punk bird""" lowercase_ : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ).resize((512, 512) ) lowercase_ : Optional[int] = load_image( """https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png""" ).resize((512, 512) ) lowercase_ : Optional[int] = pipe( __a , __a , control_image=__a , generator=__a , output_type="""np""" , num_inference_steps=50 , strength=0.6 , ) lowercase_ : List[str] = output.images[0] assert image.shape == (512, 512, 3) lowercase_ : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy""" ) assert np.abs(expected_image - image ).max() < 9E-2
239
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.get_logger(__name__) class __A ( UpperCamelCase__ ): def __init__(self : int , *__a : Dict , **__a : str ): warnings.warn( "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use CLIPImageProcessor instead." , __a , ) super().__init__(*__a , **__a )
1
0
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( 'The `image_to_image.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionImg2ImgPipeline` instead.' )
305
'''simple docstring''' from __future__ import annotations import queue class __A : def __init__(self : Optional[Any] , __a : str ): UpperCAmelCase_ = data UpperCAmelCase_ = None UpperCAmelCase_ = None def lowerCAmelCase_ ( ) -> TreeNode: '''simple docstring''' print("\n********Press N to stop entering at any point of time********\n" ) UpperCAmelCase_ = input("Enter the value of the root node: " ).strip().lower() UpperCAmelCase_ = queue.Queue() UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = q.get() UpperCAmelCase_ = f"""Enter the left node of {node_found.data}: """ UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) UpperCAmelCase_ = left_node q.put(snake_case_ ) UpperCAmelCase_ = f"""Enter the right node of {node_found.data}: """ UpperCAmelCase_ = input(snake_case_ ).strip().lower() or "n" if check == "n": return tree_node UpperCAmelCase_ = TreeNode(int(snake_case_ ) ) UpperCAmelCase_ = right_node q.put(snake_case_ ) raise def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return print(node.data , end="," ) pre_order(node.left ) pre_order(node.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return in_order(node.left ) print(node.data , end="," ) in_order(node.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end="," ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = queue.Queue() q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = queue.Queue() q.put(snake_case_ ) while not q.empty(): UpperCAmelCase_ = [] while not q.empty(): UpperCAmelCase_ = q.get() print(node_dequeued.data , end="," ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = [] UpperCAmelCase_ = node while n or stack: while n: # start from root node, find its left child print(n.data , end="," ) stack.append(snake_case_ ) UpperCAmelCase_ = n.left # end of while means current node doesn't have left child UpperCAmelCase_ = stack.pop() # start to traverse its right child UpperCAmelCase_ = n.right def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ = [] UpperCAmelCase_ = node while n or stack: while n: stack.append(snake_case_ ) UpperCAmelCase_ = n.left UpperCAmelCase_ = stack.pop() print(n.data , end="," ) UpperCAmelCase_ = n.right def lowerCAmelCase_ ( snake_case_ : TreeNode ) -> None: '''simple docstring''' if not isinstance(snake_case_ , snake_case_ ) or not node: return UpperCAmelCase_ , UpperCAmelCase_ = [], [] UpperCAmelCase_ = node stacka.append(snake_case_ ) while stacka: # to find the reversed order of post order, store it in stack2 UpperCAmelCase_ = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(snake_case_ ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end="," ) def lowerCAmelCase_ ( snake_case_ : str = "" , snake_case_ : Any=50 , snake_case_ : Union[str, Any]="*" ) -> str: '''simple docstring''' if not s: return "\n" + width * char UpperCAmelCase_ , UpperCAmelCase_ = divmod(width - len(snake_case_ ) - 2 , 2 ) return f"""{left * char} {s} {(left + extra) * char}""" if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) SCREAMING_SNAKE_CASE_: TreeNode =build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
1
0
"""simple docstring""" import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class __magic_name__ ( UpperCamelCase__ ): '''simple docstring''' __UpperCamelCase = (UnCLIPScheduler,) def _lowerCAmelCase ( self , **_a ): """simple docstring""" lowerCamelCase = { """num_train_timesteps""": 1_000, """variance_type""": """fixed_small_log""", """clip_sample""": True, """clip_sample_range""": 1.0, """prediction_type""": """epsilon""", } config.update(**__a ) return config def _lowerCAmelCase ( self ): """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__a ) def _lowerCAmelCase ( self ): """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=__a ) def _lowerCAmelCase ( self ): """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__a ) def _lowerCAmelCase ( self ): """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=__a ) def _lowerCAmelCase ( self ): """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=__a ) def _lowerCAmelCase ( self ): """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=__a , prev_timestep=__a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.scheduler_classes[0] lowerCamelCase = self.get_scheduler_config(variance_type="""fixed_small_log""" ) lowerCamelCase = scheduler_class(**__a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.scheduler_classes[0] lowerCamelCase = self.get_scheduler_config(variance_type="""learned_range""" ) lowerCamelCase = scheduler_class(**__a ) lowerCamelCase = 0.5 assert scheduler._get_variance(1 , predicted_variance=__a ) - -10.1_712_790 < 1e-5 assert scheduler._get_variance(487 , predicted_variance=__a ) - -5.7_998_052 < 1e-5 assert scheduler._get_variance(999 , predicted_variance=__a ) - -0.0_010_011 < 1e-5 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.scheduler_classes[0] lowerCamelCase = self.get_scheduler_config() lowerCamelCase = scheduler_class(**__a ) lowerCamelCase = scheduler.timesteps lowerCamelCase = self.dummy_model() lowerCamelCase = self.dummy_sample_deter lowerCamelCase = torch.manual_seed(0 ) for i, t in enumerate(__a ): # 1. predict noise residual lowerCamelCase = model(__a , __a ) # 2. predict previous mean of sample x_t-1 lowerCamelCase = scheduler.step(__a , __a , __a , generator=__a ).prev_sample lowerCamelCase = pred_prev_sample lowerCamelCase = torch.sum(torch.abs(__a ) ) lowerCamelCase = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2 assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.scheduler_classes[0] lowerCamelCase = self.get_scheduler_config() lowerCamelCase = scheduler_class(**__a ) scheduler.set_timesteps(25 ) lowerCamelCase = scheduler.timesteps lowerCamelCase = self.dummy_model() lowerCamelCase = self.dummy_sample_deter lowerCamelCase = torch.manual_seed(0 ) for i, t in enumerate(__a ): # 1. predict noise residual lowerCamelCase = model(__a , __a ) if i + 1 == timesteps.shape[0]: lowerCamelCase = None else: lowerCamelCase = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 lowerCamelCase = scheduler.step( __a , __a , __a , prev_timestep=__a , generator=__a ).prev_sample lowerCamelCase = pred_prev_sample lowerCamelCase = torch.sum(torch.abs(__a ) ) lowerCamelCase = torch.mean(torch.abs(__a ) ) assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2 assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3 def _lowerCAmelCase ( self ): """simple docstring""" pass def _lowerCAmelCase ( self ): """simple docstring""" pass
291
'''simple docstring''' from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__) @add_end_docstrings( UpperCamelCase__ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class __A ( UpperCamelCase__ ): def _lowercase (self : str , __a : GenericTensor ): if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ) else: raise ValueError("Unsupported framework" ) return masked_index def _lowercase (self : Tuple , __a : GenericTensor ): UpperCAmelCase_ = self.get_masked_index(__a ) UpperCAmelCase_ = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def _lowercase (self : List[Any] , __a : GenericTensor ): if isinstance(__a , __a ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(__a ) def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ): if return_tensors is None: UpperCAmelCase_ = self.framework UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a ) self.ensure_exactly_one_mask_token(__a ) return model_inputs def _lowercase (self : str , __a : Optional[int] ): UpperCAmelCase_ = self.model(**__a ) UpperCAmelCase_ = model_inputs["input_ids"] return model_outputs def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ): # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: UpperCAmelCase_ = target_ids.shape[0] UpperCAmelCase_ = model_outputs["input_ids"][0] UpperCAmelCase_ = model_outputs["logits"] if self.framework == "tf": UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] UpperCAmelCase_ = outputs.numpy() UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = stable_softmax(__a , axis=-1 ) if target_ids is not None: UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) ) UpperCAmelCase_ = tf.expand_dims(__a , 0 ) UpperCAmelCase_ = tf.math.top_k(__a , k=__a ) UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy() else: UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample UpperCAmelCase_ = outputs[0, masked_index, :] UpperCAmelCase_ = logits.softmax(dim=-1 ) if target_ids is not None: UpperCAmelCase_ = probs[..., target_ids] UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a ) UpperCAmelCase_ = [] UpperCAmelCase_ = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): UpperCAmelCase_ = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place UpperCAmelCase_ = input_ids.numpy().copy() if target_ids is not None: UpperCAmelCase_ = target_ids[p].tolist() UpperCAmelCase_ = p # Filter padding out: UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a ) UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(__a ) result.append(__a ) if single_mask: return result[0] return result def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ): if isinstance(__a , __a ): UpperCAmelCase_ = [targets] try: UpperCAmelCase_ = self.tokenizer.get_vocab() except Exception: UpperCAmelCase_ = {} UpperCAmelCase_ = [] for target in targets: UpperCAmelCase_ = vocab.get(__a , __a ) if id_ is None: UpperCAmelCase_ = self.tokenizer( __a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"] if len(__a ) == 0: logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ "We cannot replace it with anything meaningful, ignoring it" ) continue UpperCAmelCase_ = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f"""The specified target token `{target}` does not exist in the model vocabulary. """ f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) UpperCAmelCase_ = list(set(__a ) ) if len(__a ) == 0: raise ValueError("At least one target must be provided when passed." ) UpperCAmelCase_ = np.array(__a ) return target_ids def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ): UpperCAmelCase_ = {} if targets is not None: UpperCAmelCase_ = self.get_target_ids(__a , __a ) UpperCAmelCase_ = target_ids if top_k is not None: UpperCAmelCase_ = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ): UpperCAmelCase_ = super().__call__(__a , **__a ) if isinstance(__a , __a ) and len(__a ) == 1: return outputs[0] return outputs
1
0
from __future__ import annotations def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[str]: """simple docstring""" if partitions <= 0: raise ValueError('''partitions must be a positive number!''' ) if partitions > number_of_bytes: raise ValueError('''partitions can not > number_of_bytes!''' ) lowercase : Optional[int] = number_of_bytes // partitions lowercase : Tuple = [] for i in range(snake_case_ ): lowercase : Dict = i * bytes_per_partition + 1 lowercase : int = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f"""{start_bytes}-{end_bytes}""" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
337
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__) @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : str a__ : str a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : List[int] a__ : Optional[List[int]] = None a__ : Optional[List[int]] = None a__ : Optional[Union[int, float]] = None a__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __A ( UpperCamelCase__ ): a__ : List[InputFeatures] def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = os.path.join( __a , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , ) UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase_ = cached_features_file + ".lock" with FileLock(__a ): if os.path.exists(__a ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) UpperCAmelCase_ = torch.load(__a ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) UpperCAmelCase_ = ( processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) ) logger.info("Training examples: %s" , len(__a ) ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) logger.info("Saving features into cached file %s" , __a ) torch.save(self.features , __a ) def __len__(self : List[Any] ): return len(self.features ) def __getitem__(self : Any , __a : Optional[Any] ): return self.features[i] def _lowercase (self : Union[str, Any] ): return self.label_list if is_tf_available(): import tensorflow as tf class __A : a__ : List[InputFeatures] def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(__a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) UpperCAmelCase_ = tf.data.Dataset.from_generator( __a , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _lowercase (self : int ): return self.dataset def __len__(self : Any ): return len(self.features ) def __getitem__(self : int , __a : Union[str, Any] ): return self.features[i] def _lowercase (self : int ): return self.label_list class __A ( UpperCamelCase__ ): def _lowercase (self : List[Any] , __a : Dict ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" ) def _lowercase (self : Any , __a : List[Any] ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" ) def _lowercase (self : Any ): return ["contradiction", "entailment", "neutral"] def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ): UpperCAmelCase_ = [] for i, line in enumerate(__a ): if i == 0: continue UpperCAmelCase_ = "%s-%s" % (set_type, line[0]) UpperCAmelCase_ = line[5] UpperCAmelCase_ = line[6] UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7] UpperCAmelCase_ = line[0] examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) ) return examples def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )} UpperCAmelCase_ = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ): if ex_index % 1_00_00 == 0: logger.info("Writing example %d" % (ex_index) ) UpperCAmelCase_ = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0 UpperCAmelCase_ = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE_: int ={ 'hans': 3, } SCREAMING_SNAKE_CASE_: Any ={ 'hans': HansProcessor, }
1
0
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) __a = logging.getLogger(__name__) @dataclass(frozen=UpperCamelCase__ ) class A__ : """simple docstring""" UpperCamelCase_ : str UpperCamelCase_ : str UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None UpperCamelCase_ : Optional[str] = None @dataclass(frozen=UpperCamelCase__ ) class A__ : """simple docstring""" UpperCamelCase_ : List[int] UpperCamelCase_ : Optional[List[int]] = None UpperCamelCase_ : Optional[List[int]] = None UpperCamelCase_ : Optional[Union[int, float]] = None UpperCamelCase_ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class A__ ( UpperCamelCase__ ): """simple docstring""" UpperCamelCase_ : List[InputFeatures] def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : bool = False , ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[Any] = hans_processors[task]() _UpperCAmelCase : Any = os.path.join( __a , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , ) _UpperCAmelCase : Any = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) _UpperCAmelCase , _UpperCAmelCase : List[Any] = label_list[2], label_list[1] _UpperCAmelCase : Dict = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _UpperCAmelCase : str = cached_features_file + ".lock" with FileLock(__a ): if os.path.exists(__a ) and not overwrite_cache: logger.info(F"""Loading features from cached file {cached_features_file}""" ) _UpperCAmelCase : Optional[int] = torch.load(__a ) else: logger.info(F"""Creating features from dataset file at {data_dir}""" ) _UpperCAmelCase : Dict = ( processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) ) logger.info("Training examples: %s" , len(__a ) ) _UpperCAmelCase : List[str] = hans_convert_examples_to_features(__a , __a , __a , __a ) logger.info("Saving features into cached file %s" , __a ) torch.save(self.features , __a ) def __len__( self : List[Any] ) -> int: """simple docstring""" return len(self.features ) def __getitem__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" return self.features[i] def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.label_list if is_tf_available(): import tensorflow as tf class A__ : """simple docstring""" UpperCamelCase_ : List[InputFeatures] def __init__( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = 1_2_8 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : bool = False , ) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[int] = hans_processors[task]() _UpperCAmelCase : Optional[Any] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) _UpperCAmelCase , _UpperCAmelCase : List[str] = label_list[2], label_list[1] _UpperCAmelCase : List[str] = label_list _UpperCAmelCase : Tuple = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) _UpperCAmelCase : Any = hans_convert_examples_to_features(__a , __a , __a , __a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 1_0_0_0_0 == 0: logger.info("Writing example %d of %d" % (ex_index, len(__a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) _UpperCAmelCase : Tuple = tf.data.Dataset.from_generator( __a , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _lowerCAmelCase ( self : int ) -> Optional[int]: """simple docstring""" return self.dataset def __len__( self : Any ) -> List[str]: """simple docstring""" return len(self.features ) def __getitem__( self : int , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.features[i] def _lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" return self.label_list class A__ ( UpperCamelCase__ ): """simple docstring""" def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Dict ) -> List[str]: """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[Any] ) -> List[str]: """simple docstring""" return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" ) def _lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" return ["contradiction", "entailment", "neutral"] def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = [] for i, line in enumerate(__a ): if i == 0: continue _UpperCAmelCase : Optional[int] = "%s-%s" % (set_type, line[0]) _UpperCAmelCase : Tuple = line[5] _UpperCAmelCase : int = line[6] _UpperCAmelCase : Any = line[7][2:] if line[7].startswith("ex" ) else line[7] _UpperCAmelCase : Optional[Any] = line[0] examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) ) return examples def __UpperCAmelCase ( a_: List[InputExample], a_: List[str], a_: int, a_: PreTrainedTokenizer, ): _UpperCAmelCase : List[Any] = {label: i for i, label in enumerate(snake_case_ )} _UpperCAmelCase : List[Any] = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ), desc="convert examples to features" ): if ex_index % 10_000 == 0: logger.info("Writing example %d" % (ex_index) ) _UpperCAmelCase : Dict = tokenizer( example.text_a, example.text_b, add_special_tokens=snake_case_, max_length=snake_case_, padding="max_length", truncation=snake_case_, return_overflowing_tokens=snake_case_, ) _UpperCAmelCase : Tuple = label_map[example.label] if example.label in label_map else 0 _UpperCAmelCase : int = int(example.pairID ) features.append(InputFeatures(**snake_case_, label=snake_case_, pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features __a = { 'hans': 3, } __a = { 'hans': HansProcessor, }
145
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Tuple ={} class __A ( UpperCamelCase__ ): a__ : int = """llama""" a__ : Any = ["""past_key_values"""] def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ): UpperCAmelCase_ = vocab_size UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = hidden_size UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads # for backward compatibility if num_key_value_heads is None: UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = num_key_value_heads UpperCAmelCase_ = hidden_act UpperCAmelCase_ = initializer_range UpperCAmelCase_ = rms_norm_eps UpperCAmelCase_ = pretraining_tp UpperCAmelCase_ = use_cache UpperCAmelCase_ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def _lowercase (self : List[str] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f"""got {self.rope_scaling}""" ) UpperCAmelCase_ = self.rope_scaling.get("type" , __a ) UpperCAmelCase_ = self.rope_scaling.get("factor" , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
1
0
'''simple docstring''' import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowerCamelCase : Optional[int] = logging.get_logger(__name__) def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : int ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ ) _SCREAMING_SNAKE_CASE =downstream_dict['projector.weight'] _SCREAMING_SNAKE_CASE =downstream_dict['projector.bias'] _SCREAMING_SNAKE_CASE =downstream_dict['model.post_net.linear.weight'] _SCREAMING_SNAKE_CASE =downstream_dict['model.post_net.linear.bias'] return model def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ ) _SCREAMING_SNAKE_CASE =downstream_dict['model.linear.weight'] _SCREAMING_SNAKE_CASE =downstream_dict['model.linear.bias'] return model def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> str: """simple docstring""" _SCREAMING_SNAKE_CASE =WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ ) _SCREAMING_SNAKE_CASE =downstream_dict['connector.weight'] _SCREAMING_SNAKE_CASE =downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): _SCREAMING_SNAKE_CASE =downstream_dict[ f"model.framelevel_feature_extractor.module.{i}.kernel.weight" ] _SCREAMING_SNAKE_CASE =downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"] _SCREAMING_SNAKE_CASE =downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] _SCREAMING_SNAKE_CASE =downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] _SCREAMING_SNAKE_CASE =downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] _SCREAMING_SNAKE_CASE =downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] _SCREAMING_SNAKE_CASE =downstream_dict['objective.W'] return model @torch.no_grad() def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =torch.load(snake_case_ , map_location='cpu' ) _SCREAMING_SNAKE_CASE =checkpoint['Downstream'] _SCREAMING_SNAKE_CASE =WavaVecaConfig.from_pretrained(snake_case_ ) _SCREAMING_SNAKE_CASE =WavaVecaFeatureExtractor.from_pretrained( snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ ) _SCREAMING_SNAKE_CASE =hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): _SCREAMING_SNAKE_CASE =convert_classification(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('ForAudioFrameClassification' ): _SCREAMING_SNAKE_CASE =convert_diarization(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('ForXVector' ): _SCREAMING_SNAKE_CASE =convert_xvector(snake_case_ , snake_case_ , snake_case_ ) else: raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" ) if hf_config.use_weighted_layer_sum: _SCREAMING_SNAKE_CASE =checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(snake_case_ ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") lowerCamelCase : List[str] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
47
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __A ( unittest.TestCase ): def _lowercase (self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase (self : str ): UpperCAmelCase_ = 1 UpperCAmelCase_ = 3 UpperCAmelCase_ = (32, 32) UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def _lowercase (self : int ): torch.manual_seed(0 ) UpperCAmelCase_ = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def _lowercase (self : Any ): torch.manual_seed(0 ) UpperCAmelCase_ = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowercase (self : Optional[Any] ): torch.manual_seed(0 ) UpperCAmelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__a ) def _lowercase (self : Any ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0] UpperCAmelCase_ = image[0, -3:, -3:, -1] UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1] UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) UpperCAmelCase_ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase (self : Optional[int] ): UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowercase (self : str ): UpperCAmelCase_ = self.dummy_cond_unet_upscale UpperCAmelCase_ = DDPMScheduler() UpperCAmelCase_ = DDIMScheduler(prediction_type="v_prediction" ) UpperCAmelCase_ = self.dummy_vae UpperCAmelCase_ = self.dummy_text_encoder UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase_ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 UpperCAmelCase_ = unet.half() UpperCAmelCase_ = text_encoder.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) UpperCAmelCase_ = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) UpperCAmelCase_ = "A painting of a squirrel eating a burger" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images UpperCAmelCase_ = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __A ( unittest.TestCase ): def _lowercase (self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase (self : List[Any] ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1E-3 def _lowercase (self : Tuple ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) UpperCAmelCase_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _lowercase (self : List[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) UpperCAmelCase_ = "stabilityai/stable-diffusion-x4-upscaler" UpperCAmelCase_ = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase_ = "a cat sitting on a park bench" UpperCAmelCase_ = torch.manual_seed(0 ) UpperCAmelCase_ = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , ) UpperCAmelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
1
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Dict = { 'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = ['AlbertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ['AlbertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[int] = [ 'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'AlbertForMaskedLM', 'AlbertForMultipleChoice', 'AlbertForPreTraining', 'AlbertForQuestionAnswering', 'AlbertForSequenceClassification', 'AlbertForTokenClassification', 'AlbertModel', 'AlbertPreTrainedModel', 'load_tf_weights_in_albert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ 'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAlbertForMaskedLM', 'TFAlbertForMultipleChoice', 'TFAlbertForPreTraining', 'TFAlbertForQuestionAnswering', 'TFAlbertForSequenceClassification', 'TFAlbertForTokenClassification', 'TFAlbertMainLayer', 'TFAlbertModel', 'TFAlbertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Optional[Any] = [ 'FlaxAlbertForMaskedLM', 'FlaxAlbertForMultipleChoice', 'FlaxAlbertForPreTraining', 'FlaxAlbertForQuestionAnswering', 'FlaxAlbertForSequenceClassification', 'FlaxAlbertForTokenClassification', 'FlaxAlbertModel', 'FlaxAlbertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys __lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
'''simple docstring''' from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class __A ( UpperCamelCase__ ): def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ): UpperCAmelCase_ = 1.0 if scale is None else scale UpperCAmelCase_ = 0.0 if loc is None else loc super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] ) @property def _lowercase (self : Union[str, Any] ): return self.base_dist.mean * self.scale + self.loc @property def _lowercase (self : List[Any] ): return self.base_dist.variance * self.scale**2 @property def _lowercase (self : List[Any] ): return self.variance.sqrt() class __A ( nn.Module ): def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ): super().__init__(**__a ) UpperCAmelCase_ = args_dim UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] ) UpperCAmelCase_ = domain_map def _lowercase (self : List[str] , __a : torch.Tensor ): UpperCAmelCase_ = [proj(__a ) for proj in self.proj] return self.domain_map(*__a ) class __A ( nn.Module ): def __init__(self : Union[str, Any] , __a : List[str] ): super().__init__() UpperCAmelCase_ = function def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ): return self.function(__a , *__a ) class __A : a__ : type a__ : int a__ : Dict[str, int] def __init__(self : List[Any] , __a : int = 1 ): UpperCAmelCase_ = dim UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim} def _lowercase (self : Any , __a : Any ): if self.dim == 1: return self.distribution_class(*__a ) else: return Independent(self.distribution_class(*__a ) , 1 ) def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ): UpperCAmelCase_ = self._base_distribution(__a ) if loc is None and scale is None: return distr else: return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim ) @property def _lowercase (self : Any ): return () if self.dim == 1 else (self.dim,) @property def _lowercase (self : Dict ): return len(self.event_shape ) @property def _lowercase (self : Tuple ): return 0.0 def _lowercase (self : List[str] , __a : int ): return ParameterProjection( in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def _lowercase (self : Optional[int] , *__a : torch.Tensor ): raise NotImplementedError() @staticmethod def _lowercase (__a : torch.Tensor ): return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0 class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} a__ : type = StudentT @classmethod def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) UpperCAmelCase_ = 2.0 + cls.squareplus(__a ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"loc": 1, "scale": 1} a__ : type = Normal @classmethod def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class __A ( UpperCamelCase__ ): a__ : Dict[str, int] = {"total_count": 1, "logits": 1} a__ : type = NegativeBinomial @classmethod def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ): UpperCAmelCase_ = cls.squareplus(__a ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def _lowercase (self : List[str] , __a : str ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if self.dim == 1: return self.distribution_class(total_count=__a , logits=__a ) else: return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 ) def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ): UpperCAmelCase_ , UpperCAmelCase_ = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
1
0
"""simple docstring""" import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" _lowerCAmelCase : Any = ConsistencyModelPipeline _lowerCAmelCase : int = UNCONDITIONAL_IMAGE_GENERATION_PARAMS _lowerCAmelCase : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt _lowerCAmelCase : List[Any] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """output_type""", """return_dict""", """callback""", """callback_steps""", ] ) @property def snake_case ( self ): """simple docstring""" snake_case = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet' , ) return unet @property def snake_case ( self ): """simple docstring""" snake_case = UNetaDModel.from_pretrained( 'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , ) return unet def snake_case ( self , lowerCAmelCase=False ): """simple docstring""" if class_cond: snake_case = self.dummy_cond_unet else: snake_case = self.dummy_uncond_unet # Default to CM multistep sampler snake_case = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case = { 'unet': unet, 'scheduler': scheduler, } return components def snake_case ( self , lowerCAmelCase , lowerCAmelCase=0 ): """simple docstring""" if str(__a ).startswith('mps' ): snake_case = torch.manual_seed(__a ) else: snake_case = torch.Generator(device=__a ).manual_seed(__a ) snake_case = { 'batch_size': 1, 'num_inference_steps': None, 'timesteps': [22, 0], 'generator': generator, 'output_type': 'np', } return inputs def snake_case ( self ): """simple docstring""" snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case = self.get_dummy_components() snake_case = ConsistencyModelPipeline(**__a ) snake_case = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) snake_case = self.get_dummy_inputs(__a ) snake_case = pipe(**__a ).images assert image.shape == (1, 32, 32, 3) snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ): """simple docstring""" snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case = self.get_dummy_components(class_cond=__a ) snake_case = ConsistencyModelPipeline(**__a ) snake_case = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) snake_case = self.get_dummy_inputs(__a ) snake_case = 0 snake_case = pipe(**__a ).images assert image.shape == (1, 32, 32, 3) snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ): """simple docstring""" snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case = self.get_dummy_components() snake_case = ConsistencyModelPipeline(**__a ) snake_case = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) snake_case = self.get_dummy_inputs(__a ) snake_case = 1 snake_case = None snake_case = pipe(**__a ).images assert image.shape == (1, 32, 32, 3) snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def snake_case ( self ): """simple docstring""" snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case = self.get_dummy_components(class_cond=__a ) snake_case = ConsistencyModelPipeline(**__a ) snake_case = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) snake_case = self.get_dummy_inputs(__a ) snake_case = 1 snake_case = None snake_case = 0 snake_case = pipe(**__a ).images assert image.shape == (1, 32, 32, 3) snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self , lowerCAmelCase=0 , lowerCAmelCase=False , lowerCAmelCase="cpu" , lowerCAmelCase=torch.floataa , lowerCAmelCase=(1, 3, 64, 64) ): """simple docstring""" snake_case = torch.manual_seed(__a ) snake_case = { 'num_inference_steps': None, 'timesteps': [22, 0], 'class_labels': 0, 'generator': generator, 'output_type': 'np', } if get_fixed_latents: snake_case = self.get_fixed_latents(seed=__a , device=__a , dtype=__a , shape=__a ) snake_case = latents return inputs def snake_case ( self , lowerCAmelCase=0 , lowerCAmelCase="cpu" , lowerCAmelCase=torch.floataa , lowerCAmelCase=(1, 3, 64, 64) ): """simple docstring""" if type(__a ) == str: snake_case = torch.device(__a ) snake_case = torch.Generator(device=__a ).manual_seed(__a ) snake_case = randn_tensor(__a , generator=__a , device=__a , dtype=__a ) return latents def snake_case ( self ): """simple docstring""" snake_case = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) snake_case = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case = ConsistencyModelPipeline(unet=__a , scheduler=__a ) pipe.to(torch_device=__a ) pipe.set_progress_bar_config(disable=__a ) snake_case = self.get_inputs() snake_case = pipe(**__a ).images assert image.shape == (1, 64, 64, 3) snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def snake_case ( self ): """simple docstring""" snake_case = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) snake_case = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case = ConsistencyModelPipeline(unet=__a , scheduler=__a ) pipe.to(torch_device=__a ) pipe.set_progress_bar_config(disable=__a ) snake_case = self.get_inputs() snake_case = 1 snake_case = None snake_case = pipe(**__a ).images assert image.shape == (1, 64, 64, 3) snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def snake_case ( self ): """simple docstring""" snake_case = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) snake_case = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case = ConsistencyModelPipeline(unet=__a , scheduler=__a ) pipe.to(torch_device=__a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__a ) snake_case = self.get_inputs(get_fixed_latents=__a , device=__a ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ): snake_case = pipe(**__a ).images assert image.shape == (1, 64, 64, 3) snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def snake_case ( self ): """simple docstring""" snake_case = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' ) snake_case = CMStochasticIterativeScheduler( num_train_timesteps=40 , sigma_min=0.0_02 , sigma_max=80.0 , ) snake_case = ConsistencyModelPipeline(unet=__a , scheduler=__a ) pipe.to(torch_device=__a , torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=__a ) snake_case = self.get_inputs(get_fixed_latents=__a , device=__a ) snake_case = 1 snake_case = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=__a , enable_math=__a , enable_mem_efficient=__a ): snake_case = pipe(**__a ).images assert image.shape == (1, 64, 64, 3) snake_case = image[0, -3:, -3:, -1] snake_case = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
150
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets SCREAMING_SNAKE_CASE_: Optional[Any] ='\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE_: Union[str, Any] ='\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' SCREAMING_SNAKE_CASE_: List[Any] =r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __A ( datasets.Metric ): def _lowercase (self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def _lowercase (self : Tuple , __a : Optional[int] , __a : List[Any] ): UpperCAmelCase_ = 0.0 for i, j in zip(__a , __a ): n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0 UpperCAmelCase_ = n_correct / len(__a ) return { "accuracy": accuracy, }
1
0
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( UpperCamelCase__, unittest.TestCase ): '''simple docstring''' lowerCamelCase__ = BioGptTokenizer lowerCamelCase__ = False def A_ ( self ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _lowerCamelCase : Union[str, Any] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] _lowerCamelCase : Any = dict(zip(__a , range(len(__a ) ) ) ) _lowerCamelCase : Tuple = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' ) as fp: fp.write(json.dumps(__a ) ) with open(self.merges_file , 'w' ) as fp: fp.write('\n'.join(__a ) ) def A_ ( self , lowercase ): _lowerCamelCase : str = 'lower newer' _lowerCamelCase : Optional[Any] = 'lower newer' return input_text, output_text def A_ ( self ): _lowerCamelCase : Dict = BioGptTokenizer(self.vocab_file , self.merges_file ) _lowerCamelCase : str = 'lower' _lowerCamelCase : Optional[int] = ['low', 'er</w>'] _lowerCamelCase : str = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) _lowerCamelCase : List[str] = tokens + ['<unk>'] _lowerCamelCase : Union[str, Any] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a ) @slow def A_ ( self ): _lowerCamelCase : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) _lowerCamelCase : List[str] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) _lowerCamelCase : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) _lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(__a ) _lowerCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
96
'''simple docstring''' from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] ) -> List[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : str=True ) -> Optional[Any]: '''simple docstring''' model.train() UpperCAmelCase_ = model(snake_case_ ) UpperCAmelCase_ = F.mse_loss(snake_case_ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : Any=False ) -> Dict: '''simple docstring''' set_seed(42 ) UpperCAmelCase_ = RegressionModel() UpperCAmelCase_ = deepcopy(snake_case_ ) UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) model.to(accelerator.device ) if sched: UpperCAmelCase_ = AdamW(params=model.parameters() , lr=1E-3 ) UpperCAmelCase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) UpperCAmelCase_ = LambdaLR(snake_case_ , lr_lambda=lambda snake_case_ : epoch**0.65 ) # Make a copy of `model` if sched: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCAmelCase_ ( snake_case_ : Any ) -> int: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> str: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) # Use a single batch UpperCAmelCase_ , UpperCAmelCase_ = next(iter(snake_case_ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: # Sync grads step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] def lowerCAmelCase_ ( snake_case_ : Optional[int]=False , snake_case_ : str=False ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case_ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) UpperCAmelCase_ = ddp_input[torch.randperm(len(snake_case_ ) )] GradientState._reset_state() def lowerCAmelCase_ ( snake_case_ : Optional[Any]=False , snake_case_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator( split_batches=snake_case_ , dispatch_batches=snake_case_ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = get_training_setup(snake_case_ , snake_case_ ) for iteration, batch in enumerate(snake_case_ ): UpperCAmelCase_ , UpperCAmelCase_ = batch.values() # Gather the distributed inputs and targs for the base model UpperCAmelCase_ , UpperCAmelCase_ = accelerator.gather((ddp_input, ddp_target) ) UpperCAmelCase_ , UpperCAmelCase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case_ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case_ ): step_model(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" UpperCAmelCase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case_ )) if accelerator.num_processes > 1: check_model_parameters(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def lowerCAmelCase_ ( ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = RegressionDataset(length=80 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ = RegressionDataset(length=96 ) UpperCAmelCase_ = DataLoader(snake_case_ , batch_size=16 ) UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(snake_case_ , snake_case_ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if iteration < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case_ ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case_ ) if batch_num < len(snake_case_ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Accelerator() UpperCAmelCase_ = accelerator.state if state.local_process_index == 0: print("**Test `accumulate` gradient accumulation with dataloader break**" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("**Test NOOP `no_sync` context manager**" ) test_noop_sync(snake_case_ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("**Test Distributed `no_sync` context manager**" ) test_distributed_sync(snake_case_ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(snake_case_ , snake_case_ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( "**Test `accumulate` gradient accumulation with optimizer and scheduler, " , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : Dict ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
1
0
"""simple docstring""" def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(snake_case_ ,x % y ) def __lowercase ( snake_case_ : int ,snake_case_ : int ) ->int: '''simple docstring''' return (x * y) // greatest_common_divisor(snake_case_ ,snake_case_ ) def __lowercase ( snake_case_ : int = 20 ) ->int: '''simple docstring''' __A : Tuple = 1 for i in range(1 ,n + 1 ): __A : str = lcm(snake_case_ ,snake_case_ ) return g if __name__ == "__main__": print(f'''{solution() = }''')
179
'''simple docstring''' def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(snake_case_ , x % y ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int: '''simple docstring''' return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int: '''simple docstring''' UpperCAmelCase_ = 1 for i in range(1 , n + 1 ): UpperCAmelCase_ = lcm(snake_case_ , snake_case_ ) return g if __name__ == "__main__": print(f"{solution() = }")
1
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Optional[int] = 1 A_ : List[Any] = 3 A_ : str = (32, 32) A_ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a ) return image @property def SCREAMING_SNAKE_CASE ( self :int ): '''simple docstring''' torch.manual_seed(0 ) A_ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , ) return model @property def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' torch.manual_seed(0 ) A_ : Any = AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def SCREAMING_SNAKE_CASE ( self :Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) A_ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , ) return CLIPTextModel(__a ) def SCREAMING_SNAKE_CASE ( self :Any ): '''simple docstring''' A_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator A_ : List[Any] = self.dummy_cond_unet_upscale A_ : int = DDPMScheduler() A_ : Optional[Any] = DDIMScheduler(prediction_type="v_prediction" ) A_ : str = self.dummy_vae A_ : Optional[int] = self.dummy_text_encoder A_ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A_ : Optional[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : List[str] = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk A_ : str = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) A_ : Optional[int] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) A_ : List[str] = "A painting of a squirrel eating a burger" A_ : Dict = torch.Generator(device=__a ).manual_seed(0 ) A_ : Optional[Any] = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) A_ : Union[str, Any] = output.images A_ : Tuple = torch.Generator(device=__a ).manual_seed(0 ) A_ : Union[str, Any] = sd_pipe( [prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__a , )[0] A_ : Optional[Any] = image[0, -3:, -3:, -1] A_ : Any = image_from_tuple[0, -3:, -3:, -1] A_ : str = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) A_ : str = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def SCREAMING_SNAKE_CASE ( self :Optional[int] ): '''simple docstring''' A_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator A_ : List[Any] = self.dummy_cond_unet_upscale A_ : Any = DDPMScheduler() A_ : List[str] = DDIMScheduler(prediction_type="v_prediction" ) A_ : List[str] = self.dummy_vae A_ : List[str] = self.dummy_text_encoder A_ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A_ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : List[Any] = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk A_ : List[Any] = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) A_ : Optional[int] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) A_ : Tuple = "A painting of a squirrel eating a burger" A_ : Tuple = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) A_ : Optional[Any] = output.images assert image.shape[0] == 2 A_ : List[Any] = torch.Generator(device=__a ).manual_seed(0 ) A_ : List[Any] = sd_pipe( [prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , ) A_ : Optional[Any] = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def SCREAMING_SNAKE_CASE ( self :str ): '''simple docstring''' A_ : Dict = self.dummy_cond_unet_upscale A_ : Optional[int] = DDPMScheduler() A_ : str = DDIMScheduler(prediction_type="v_prediction" ) A_ : str = self.dummy_vae A_ : Dict = self.dummy_text_encoder A_ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) A_ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : int = Image.fromarray(np.uinta(__a ) ).convert("RGB" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 A_ : Any = unet.half() A_ : Optional[Any] = text_encoder.half() # make sure here that pndm scheduler skips prk A_ : int = StableDiffusionUpscalePipeline( unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , ) A_ : Union[str, Any] = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) A_ : List[Any] = "A painting of a squirrel eating a burger" A_ : List[str] = torch.manual_seed(0 ) A_ : Optional[Any] = sd_pipe( [prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="np" , ).images A_ : Dict = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self :List[str] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' A_ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) A_ : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) A_ : Dict = "stabilityai/stable-diffusion-x4-upscaler" A_ : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() A_ : Any = "a cat sitting on a park bench" A_ : Union[str, Any] = torch.manual_seed(0 ) A_ : Tuple = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) A_ : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 1e-3 def SCREAMING_SNAKE_CASE ( self :Tuple ): '''simple docstring''' A_ : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) A_ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) A_ : Optional[int] = "stabilityai/stable-diffusion-x4-upscaler" A_ : str = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing() A_ : Tuple = "a cat sitting on a park bench" A_ : str = torch.manual_seed(0 ) A_ : Union[str, Any] = pipe( prompt=__a , image=__a , generator=__a , output_type="np" , ) A_ : str = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image ).max() < 5e-1 def SCREAMING_SNAKE_CASE ( self :List[Any] ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A_ : str = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) A_ : int = "stabilityai/stable-diffusion-x4-upscaler" A_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained( __a , torch_dtype=torch.floataa , ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A_ : List[Any] = "a cat sitting on a park bench" A_ : str = torch.manual_seed(0 ) A_ : Optional[Any] = pipe( prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="np" , ) A_ : Dict = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
300
'''simple docstring''' import os from math import logaa def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int: '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ): UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) ) if x * logaa(snake_case_ ) > largest: UpperCAmelCase_ = x * logaa(snake_case_ ) UpperCAmelCase_ = i + 1 return result if __name__ == "__main__": print(solution())
1
0
'''simple docstring''' def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : int = 0 ) -> int: lowercase_ : List[str] = right or len(snake_case_ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(snake_case_ , snake_case_ , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
239
'''simple docstring''' import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = checkpoint UpperCAmelCase_ = {} UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"] UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"] UpperCAmelCase_ = vae_state_dict["quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["quant_conv.bias"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"] UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the decoder up blocks only UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) UpperCAmelCase_ = { layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(snake_case_ ) } for i in range(snake_case_ ): UpperCAmelCase_ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key] if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.weight""" ) UpperCAmelCase_ = vae_state_dict.pop( f"""encoder.down.{i}.downsample.conv.bias""" ) UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) for i in range(snake_case_ ): UpperCAmelCase_ = num_up_blocks - 1 - i UpperCAmelCase_ = [ key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key ] if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict: UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.weight""" ] UpperCAmelCase_ = vae_state_dict[ f"""decoder.up.{block_id}.upsample.conv.bias""" ] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key] UpperCAmelCase_ = 2 for i in range(1 , num_mid_res_blocks + 1 ): UpperCAmelCase_ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key] UpperCAmelCase_ = renew_vae_resnet_paths(snake_case_ ) UpperCAmelCase_ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] UpperCAmelCase_ = renew_vae_attention_paths(snake_case_ ) UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) conv_attn_to_linear(snake_case_ ) return new_checkpoint def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) -> Dict: '''simple docstring''' UpperCAmelCase_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) UpperCAmelCase_ = io.BytesIO(r.content ) UpperCAmelCase_ = OmegaConf.load(snake_case_ ) UpperCAmelCase_ = 5_12 UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open UpperCAmelCase_ = {} with safe_open(snake_case_ , framework="pt" , device="cpu" ) as f: for key in f.keys(): UpperCAmelCase_ = f.get_tensor(snake_case_ ) else: UpperCAmelCase_ = torch.load(snake_case_ , map_location=snake_case_ )["state_dict"] # Convert the VAE model. UpperCAmelCase_ = create_vae_diffusers_config(snake_case_ , image_size=snake_case_ ) UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(snake_case_ , snake_case_ ) UpperCAmelCase_ = AutoencoderKL(**snake_case_ ) vae.load_state_dict(snake_case_ ) vae.save_pretrained(snake_case_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Optional[int] =argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') SCREAMING_SNAKE_CASE_: str =parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
1
0
from __future__ import annotations A : List[str] = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class A : '''simple docstring''' def __init__(self : str , _UpperCAmelCase : dict[str, list[str]] , _UpperCAmelCase : str ) -> Union[str, Any]: """simple docstring""" lowercase__ = graph # mapping node to its parent in resulting breadth first tree lowercase__ = {} lowercase__ = source_vertex def lowerCamelCase__ (self : Dict ) -> Optional[Any]: """simple docstring""" lowercase__ = {self.source_vertex} lowercase__ = None lowercase__ = [self.source_vertex] # first in first out queue while queue: lowercase__ = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__a ) lowercase__ = vertex queue.append(__a ) def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : str ) -> Any: """simple docstring""" if target_vertex == self.source_vertex: return self.source_vertex lowercase__ = self.parent.get(__a ) if target_vertex_parent is None: lowercase__ = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(__a ) return self.shortest_path(__a ) + f'''->{target_vertex}''' if __name__ == "__main__": A : List[Any] = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
305
'''simple docstring''' import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class __A ( unittest.TestCase ): def __init__(self : str , __a : Optional[Any] , __a : Optional[Any]=13 , __a : int=30 , __a : Union[str, Any]=2 , __a : Dict=3 , __a : List[Any]=True , __a : Optional[Any]=True , __a : List[Any]=32 , __a : Any=5 , __a : str=4 , __a : Optional[int]=37 , __a : Optional[int]="gelu" , __a : List[str]=0.1 , __a : Tuple=0.1 , __a : List[str]=10 , __a : Optional[int]=0.02 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = patch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (image_size // patch_size) ** 2 UpperCAmelCase_ = num_patches + 1 def _lowercase (self : Any ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , ) return config, pixel_values def _lowercase (self : Dict , __a : Any , __a : List[Any] ): UpperCAmelCase_ = FlaxViTModel(config=__a ) UpperCAmelCase_ = model(__a ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ = (self.image_size, self.image_size) UpperCAmelCase_ = (self.patch_size, self.patch_size) UpperCAmelCase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def _lowercase (self : Tuple , __a : str , __a : Any ): UpperCAmelCase_ = self.type_sequence_label_size UpperCAmelCase_ = FlaxViTForImageClassification(config=__a ) UpperCAmelCase_ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ = 1 UpperCAmelCase_ = FlaxViTForImageClassification(__a ) UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ = model(__a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Tuple = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def _lowercase (self : Any ): UpperCAmelCase_ = FlaxViTModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def _lowercase (self : Tuple ): self.config_tester.run_common_tests() def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCAmelCase_ = self._prepare_for_class(__a , __a ) UpperCAmelCase_ = model_class(__a ) @jax.jit def model_jitted(__a : Tuple , **__a : List[Any] ): return model(pixel_values=__a , **__a ) with self.subTest("JIT Enabled" ): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): UpperCAmelCase_ = model_jitted(**__a ).to_tuple() self.assertEqual(len(__a ) , len(__a ) ) for jitted_output, output in zip(__a , __a ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowercase (self : Tuple ): for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained("google/vit-base-patch16-224" ) UpperCAmelCase_ = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__a )
1
0
"""simple docstring""" from __future__ import annotations def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None: if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): lowerCamelCase , lowerCamelCase = array[indexa], array[indexa] def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None: if length > 1: lowerCamelCase = int(length / 2 ) for i in range(snake_case_ , low + middle ): comp_and_swap(snake_case_ , snake_case_ , i + middle , snake_case_ ) bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) bitonic_merge(snake_case_ , low + middle , snake_case_ , snake_case_ ) def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> None: if length > 1: lowerCamelCase = int(length / 2 ) bitonic_sort(snake_case_ , snake_case_ , snake_case_ , 1 ) bitonic_sort(snake_case_ , low + middle , snake_case_ , 0 ) bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) if __name__ == "__main__": lowerCAmelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase : Union[str, Any] = [int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
291
'''simple docstring''' import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = 5 # Realm tok UpperCAmelCase_ = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "test", "question", "this", "is", "the", "first", "second", "third", "fourth", "fifth", "record", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_tokenizer" ) os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = os.path.join(__a , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) UpperCAmelCase_ = os.path.join(self.tmpdirname , "realm_block_records" ) os.makedirs(__a , exist_ok=__a ) def _lowercase (self : Optional[Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) ) def _lowercase (self : Any ): shutil.rmtree(self.tmpdirname ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = RealmConfig(num_block_records=self.num_block_records ) return config def _lowercase (self : List[str] ): UpperCAmelCase_ = Dataset.from_dict( { "id": ["0", "1"], "question": ["foo", "bar"], "answers": [["Foo", "Bar"], ["Bar"]], } ) return dataset def _lowercase (self : Any ): UpperCAmelCase_ = np.array( [ B"This is the first record", B"This is the second record", B"This is the third record", B"This is the fourth record", B"This is the fifth record", B"This is a longer longer longer record", ] , dtype=__a , ) return block_records def _lowercase (self : Union[str, Any] ): UpperCAmelCase_ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def _lowercase (self : int ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(len(__a ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , ) def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.get_config() UpperCAmelCase_ = self.get_dummy_retriever() UpperCAmelCase_ = retriever.tokenizer UpperCAmelCase_ = np.array([0, 3, 5] , dtype="long" ) UpperCAmelCase_ = tokenizer(["Test question"] ).input_ids UpperCAmelCase_ = tokenizer( ["the fourth", "longer longer"] , add_special_tokens=__a , return_token_type_ids=__a , return_attention_mask=__a , ).input_ids UpperCAmelCase_ = config.reader_seq_len UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = retriever( __a , __a , answer_ids=__a , max_length=__a , return_tensors="np" ) self.assertEqual([False, True, True] , __a ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , __a ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , __a ) def _lowercase (self : Optional[Any] ): UpperCAmelCase_ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) # Test local path UpperCAmelCase_ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) ) self.assertEqual(retriever.block_records[0] , B"This is the first record" ) # Test mocked remote path with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download: UpperCAmelCase_ = os.path.join( os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME ) UpperCAmelCase_ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" ) self.assertEqual(retriever.block_records[0] , B"This is the first record" )
1
0
from __future__ import annotations __a = [] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" for i in range(len(snake_case_ ) ): if board[row][i] == 1: return False for i in range(len(snake_case_ ) ): if board[i][column] == 1: return False for i, j in zip(range(snake_case_, -1, -1 ), range(snake_case_, -1, -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(snake_case_, -1, -1 ), range(snake_case_, len(snake_case_ ) ) ): if board[i][j] == 1: return False return True def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" if row >= len(snake_case_ ): solution.append(snake_case_ ) printboard(snake_case_ ) print() return True for i in range(len(snake_case_ ) ): if is_safe(snake_case_, snake_case_, snake_case_ ): lowercase : Union[str, Any] = 1 solve(snake_case_, row + 1 ) lowercase : int = 0 return False def __lowercase ( _UpperCamelCase ) ->None: """simple docstring""" for i in range(len(snake_case_ ) ): for j in range(len(snake_case_ ) ): if board[i][j] == 1: print('''Q''', end=''' ''' ) else: print('''.''', end=''' ''' ) print() # n=int(input("The no. of queens")) __a = 8 __a = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
337
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants SCREAMING_SNAKE_CASE_: Optional[int] =3_00 # TEMPERATURE (unit = K) def lowerCAmelCase_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float: '''simple docstring''' if donor_conc <= 0: raise ValueError("Donor concentration should be positive" ) elif acceptor_conc <= 0: raise ValueError("Acceptor concentration should be positive" ) elif intrinsic_conc <= 0: raise ValueError("Intrinsic concentration should be positive" ) elif donor_conc <= intrinsic_conc: raise ValueError( "Donor concentration should be greater than intrinsic concentration" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( "Acceptor concentration should be greater than intrinsic concentration" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
1
0
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __a = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __a = 50_003 __a = 50_002 @require_sentencepiece @require_tokenizers class A__ ( UpperCamelCase__ , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Union[str, Any] = PLBartTokenizer UpperCamelCase_ : Union[str, Any] = None UpperCamelCase_ : Tuple = False def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase : Tuple = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : List[str] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a ) _UpperCAmelCase : Union[str, Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) _UpperCAmelCase : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual( __a , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) _UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) _UpperCAmelCase : Any = tokenizer.vocab_size _UpperCAmelCase : Dict = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )] self.assertListEqual(__a , ["__java__", "__python__", "__en_XX__", "<mask>"] ) _UpperCAmelCase : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" _UpperCAmelCase : Any = tokenizer(__a ).input_ids self.assertEqual( tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , ) def _lowerCAmelCase ( self : List[str] ) -> Tuple: """simple docstring""" _UpperCAmelCase : str = PLBartTokenizer(__a , language_codes="multi" , keep_accents=__a ) _UpperCAmelCase : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) _UpperCAmelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _UpperCAmelCase : Dict = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual( __a , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) _UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) _UpperCAmelCase : List[Any] = tokenizer.vocab_size _UpperCAmelCase : List[Any] = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )] self.assertListEqual( __a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] ) _UpperCAmelCase : Optional[Any] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" _UpperCAmelCase : Dict = tokenizer(__a ).input_ids self.assertEqual( tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , ) @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : str = """uclanlp/plbart-python-en_XX""" UpperCamelCase_ : List[str] = [ """def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""", """def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""", ] UpperCamelCase_ : Tuple = [ """Returns the maximum value of a b c.""", """Sums the values of a b c.""", ] UpperCamelCase_ : Optional[int] = [ 1_34, 54_52, 3_34_60, 3_34_41, 3_34_63, 3_34_65, 3_34_63, 3_34_49, 9_88, 20, 3_34_56, 19, 3_34_56, 7_71, 39, 42_58, 8_89, 33_18, 3_34_41, 3_34_63, 3_34_65, 3_34_63, 3_34_49, 24_71, 2, PYTHON_CODE, ] @classmethod def _lowerCAmelCase ( cls : Optional[int] ) -> Dict: """simple docstring""" _UpperCAmelCase : int = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" ) _UpperCAmelCase : List[str] = 1 return cls def _lowerCAmelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 ) def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __a ) def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" self.assertIn(__a , self.tokenizer.all_special_ids ) _UpperCAmelCase : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2] _UpperCAmelCase : Tuple = self.tokenizer.decode(__a , skip_special_tokens=__a ) _UpperCAmelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a ) self.assertEqual(__a , __a ) self.assertNotIn(self.tokenizer.eos_token , __a ) def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _UpperCAmelCase : List[str] = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0] self.assertIsInstance(src_text[0] , __a ) _UpperCAmelCase : int = 1_0 _UpperCAmelCase : int = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , __a ) self.assertEqual(len(__a ) , __a ) def _lowerCAmelCase ( self : str ) -> List[str]: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] ) def _lowerCAmelCase ( self : int ) -> str: """simple docstring""" _UpperCAmelCase : List[Any] = tempfile.mkdtemp() _UpperCAmelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__a ) _UpperCAmelCase : List[str] = PLBartTokenizer.from_pretrained(__a ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a ) @require_torch def _lowerCAmelCase ( self : str ) -> Dict: """simple docstring""" _UpperCAmelCase : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" ) _UpperCAmelCase : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , __a ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : List[Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) _UpperCAmelCase : List[Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id ) self.assertIsInstance(__a , __a ) self.assertEqual((2, 2_6) , batch.input_ids.shape ) self.assertEqual((2, 2_6) , batch.attention_mask.shape ) _UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __a ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" ) _UpperCAmelCase : List[str] = self.tokenizer( text_target=self.tgt_text , padding=__a , truncation=__a , max_length=1_0 , return_tensors="pt" ) _UpperCAmelCase : Optional[int] = targets["input_ids"] _UpperCAmelCase : Dict = shift_tokens_right(__a , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def _lowerCAmelCase ( self : List[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" ) self.assertEqual( nested_simplify(__a ) , { # A, test, EOS, en_XX "input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]], "attention_mask": [[1, 1, 1, 1]], # java "forced_bos_token_id": 5_0_0_0_1, } , )
145
'''simple docstring''' import math def lowerCAmelCase_ ( ) -> None: '''simple docstring''' UpperCAmelCase_ = input("Enter message: " ) UpperCAmelCase_ = int(input(f"""Enter key [2-{len(snake_case_ ) - 1}]: """ ) ) UpperCAmelCase_ = input("Encryption/Decryption [e/d]: " ) if mode.lower().startswith("e" ): UpperCAmelCase_ = encrypt_message(snake_case_ , snake_case_ ) elif mode.lower().startswith("d" ): UpperCAmelCase_ = decrypt_message(snake_case_ , snake_case_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + "|"}""" ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = [""] * key for col in range(snake_case_ ): UpperCAmelCase_ = col while pointer < len(snake_case_ ): cipher_text[col] += message[pointer] pointer += key return "".join(snake_case_ ) def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : str ) -> str: '''simple docstring''' UpperCAmelCase_ = math.ceil(len(snake_case_ ) / key ) UpperCAmelCase_ = key UpperCAmelCase_ = (num_cols * num_rows) - len(snake_case_ ) UpperCAmelCase_ = [""] * num_cols UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase_ = 0 row += 1 return "".join(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
1
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def _lowerCAmelCase ( _UpperCamelCase : int ) -> List[str]: """simple docstring""" if "img_encoder.pos_embed" in name: _SCREAMING_SNAKE_CASE =name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: _SCREAMING_SNAKE_CASE =name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: _SCREAMING_SNAKE_CASE =name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: _SCREAMING_SNAKE_CASE =name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: _SCREAMING_SNAKE_CASE =name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: _SCREAMING_SNAKE_CASE =name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: _SCREAMING_SNAKE_CASE =name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: _SCREAMING_SNAKE_CASE =name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: _SCREAMING_SNAKE_CASE =name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: _SCREAMING_SNAKE_CASE =name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: _SCREAMING_SNAKE_CASE =name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: _SCREAMING_SNAKE_CASE =name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: _SCREAMING_SNAKE_CASE =name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: _SCREAMING_SNAKE_CASE =name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: _SCREAMING_SNAKE_CASE =name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: _SCREAMING_SNAKE_CASE =name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: _SCREAMING_SNAKE_CASE =name.replace('c_fc' , 'fc1' ) if "c_proj" in name: _SCREAMING_SNAKE_CASE =name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: _SCREAMING_SNAKE_CASE =name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: _SCREAMING_SNAKE_CASE =name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: _SCREAMING_SNAKE_CASE =name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: _SCREAMING_SNAKE_CASE =name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: _SCREAMING_SNAKE_CASE =name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: _SCREAMING_SNAKE_CASE =name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] ) -> Union[str, Any]: """simple docstring""" for key in orig_state_dict.copy().keys(): _SCREAMING_SNAKE_CASE =orig_state_dict.pop(snake_case_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _SCREAMING_SNAKE_CASE =key.split('.' ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =int(key_split[2] ), int(key_split[4] ) _SCREAMING_SNAKE_CASE =config.vision_config.hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE =val[:dim, :] _SCREAMING_SNAKE_CASE =val[dim : dim * 2, :] _SCREAMING_SNAKE_CASE =val[-dim:, :] else: _SCREAMING_SNAKE_CASE =val[:dim] _SCREAMING_SNAKE_CASE =val[dim : dim * 2] _SCREAMING_SNAKE_CASE =val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _SCREAMING_SNAKE_CASE =key.split('.' ) _SCREAMING_SNAKE_CASE =int(key_split[3] ) _SCREAMING_SNAKE_CASE =config.text_config.hidden_size if "weight" in key: _SCREAMING_SNAKE_CASE =val[:dim, :] _SCREAMING_SNAKE_CASE =val[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE =val[-dim:, :] else: _SCREAMING_SNAKE_CASE =val[:dim] _SCREAMING_SNAKE_CASE =val[dim : dim * 2] _SCREAMING_SNAKE_CASE =val[-dim:] else: _SCREAMING_SNAKE_CASE =rename_key(snake_case_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): _SCREAMING_SNAKE_CASE =val.squeeze_() else: _SCREAMING_SNAKE_CASE =val return orig_state_dict def _lowerCAmelCase ( ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE ='http://images.cocodataset.org/val2017/000000039769.jpg' _SCREAMING_SNAKE_CASE =Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]="groupvit-gcc-yfcc" , _UpperCamelCase : List[str]=False ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =GroupViTConfig() _SCREAMING_SNAKE_CASE =GroupViTModel(snake_case_ ).eval() _SCREAMING_SNAKE_CASE =torch.load(snake_case_ , map_location='cpu' )['model'] _SCREAMING_SNAKE_CASE =convert_state_dict(snake_case_ , snake_case_ ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =model.load_state_dict(snake_case_ , strict=snake_case_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case_ ) == 0) # verify result _SCREAMING_SNAKE_CASE =CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) _SCREAMING_SNAKE_CASE =prepare_img() _SCREAMING_SNAKE_CASE =processor(text=['a photo of a cat', 'a photo of a dog'] , images=snake_case_ , padding=snake_case_ , return_tensors='pt' ) with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(**snake_case_ ) if model_name == "groupvit-gcc-yfcc": _SCREAMING_SNAKE_CASE =torch.tensor([[13.35_23, 6.36_29]] ) elif model_name == "groupvit-gcc-redcaps": _SCREAMING_SNAKE_CASE =torch.tensor([[16.18_73, 8.62_30]] ) else: raise ValueError(f"Model name {model_name} not supported." ) assert torch.allclose(outputs.logits_per_image , snake_case_ , atol=1E-3 ) processor.save_pretrained(snake_case_ ) model.save_pretrained(snake_case_ ) print('Successfully saved processor and model to' , snake_case_ ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(snake_case_ , organization='nielsr' ) model.push_to_hub(snake_case_ , organization='nielsr' ) if __name__ == "__main__": lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) lowerCamelCase : Optional[int] = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
47
'''simple docstring''' import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) SCREAMING_SNAKE_CASE_: Optional[int] =logging.getLogger() SCREAMING_SNAKE_CASE_: Union[str, Any] =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __A ( UpperCamelCase__ ): def _lowercase (self : Optional[Any] , __a : str ): os.makedirs(__a , exist_ok=__a ) UpperCAmelCase_ = {"source": "What is love ?", "target": "life"} UpperCAmelCase_ = {"train": 12, "val": 2, "test": 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: UpperCAmelCase_ = "\n".join([contents[field]] * n_lines[split] ) with open(os.path.join(__a , f"""{split}.{field}""" ) , "w" ) as f: f.write(__a ) def _lowercase (self : Optional[int] , __a : int , __a : str = "pytorch" ): UpperCAmelCase_ = self.get_auto_remove_tmp_dir() UpperCAmelCase_ = os.path.join(__a , "output" ) UpperCAmelCase_ = os.path.join(__a , "data" ) self._create_dummy_data(data_dir=__a ) UpperCAmelCase_ = f""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(f"""--gpus={gpus}""" ) if is_apex_available(): testargs.append("--fp16" ) else: testargs.append("--gpus=0" ) testargs.append("--distributed_backend=ddp_cpu" ) testargs.append("--num_processes=2" ) UpperCAmelCase_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(__a , env=self.get_env() ) UpperCAmelCase_ = os.path.join(__a , "metrics.json" ) with open(__a ) as f: UpperCAmelCase_ = json.load(__a ) return result @require_torch_gpu def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu def _lowercase (self : Dict ): UpperCAmelCase_ = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_gpu @require_ray def _lowercase (self : Optional[int] ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 ) @require_torch_multi_gpu @require_ray def _lowercase (self : Any ): UpperCAmelCase_ = self._run_finetune(gpus=1 , distributed_retriever="ray" ) self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
1
0